aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-mtd6
-rw-r--r--Documentation/DMA-API-HOWTO.txt153
-rw-r--r--Documentation/DMA-API.txt580
-rw-r--r--Documentation/DMA-ISA-LPC.txt73
-rw-r--r--Documentation/DMA-attributes.txt15
-rw-r--r--Documentation/IPMI.txt76
-rw-r--r--Documentation/IRQ-affinity.txt75
-rw-r--r--Documentation/IRQ-domain.txt69
-rw-r--r--Documentation/IRQ.txt2
-rw-r--r--Documentation/Intel-IOMMU.txt37
-rw-r--r--Documentation/SAK.txt65
-rw-r--r--Documentation/SM501.txt9
-rw-r--r--Documentation/bcache.txt190
-rw-r--r--Documentation/bt8xxgpio.txt19
-rw-r--r--Documentation/btmrvl.txt65
-rw-r--r--Documentation/bus-virt-phys-mapping.txt64
-rw-r--r--Documentation/cachetlb.txt92
-rw-r--r--Documentation/cgroup-v2.txt460
-rw-r--r--Documentation/circular-buffers.txt51
-rw-r--r--Documentation/clk.txt189
-rw-r--r--Documentation/core-api/kernel-api.rst2
-rw-r--r--Documentation/cpu-load.txt131
-rw-r--r--Documentation/cputopology.txt37
-rw-r--r--Documentation/crc32.txt75
-rw-r--r--Documentation/crypto/asymmetric-keys.txt65
-rw-r--r--Documentation/dcdbas.txt24
-rw-r--r--Documentation/debugging-via-ohci1394.txt21
-rw-r--r--Documentation/dell_rbu.txt81
-rw-r--r--Documentation/devicetree/bindings/clock/img,boston-clock.txt31
-rw-r--r--Documentation/devicetree/bindings/mtd/denali-nand.txt13
-rw-r--r--Documentation/devicetree/bindings/mtd/elm.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmc-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmc-nor.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmc-onenand.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmi-nand.txt14
-rw-r--r--Documentation/devicetree/bindings/mtd/microchip,mchp23k256.txt18
-rw-r--r--Documentation/devicetree/bindings/mtd/mtk-nand.txt5
-rw-r--r--Documentation/devicetree/bindings/mtd/nand.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/partition.txt32
-rw-r--r--Documentation/devicetree/bindings/net/brcm,amac.txt1
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt24
-rw-r--r--Documentation/devicetree/bindings/net/gpmc-eth.txt4
-rw-r--r--Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt15
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-meson.txt4
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-stm32.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt22
-rw-r--r--Documentation/devicetree/bindings/rtc/cortina,gemini.txt14
-rw-r--r--Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt28
-rw-r--r--Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt32
-rw-r--r--Documentation/digsig.txt131
-rw-r--r--Documentation/driver-api/basics.rst3
-rw-r--r--Documentation/efi-stub.txt25
-rw-r--r--Documentation/eisa.txt273
-rw-r--r--Documentation/fault-injection/fault-injection.txt79
-rw-r--r--Documentation/filesystems/proc.txt6
-rw-r--r--Documentation/filesystems/vfs.txt6
-rw-r--r--Documentation/flexible-arrays.txt25
-rw-r--r--Documentation/futex-requeue-pi.txt93
-rw-r--r--Documentation/gcc-plugins.txt58
-rw-r--r--Documentation/highuid.txt47
-rw-r--r--Documentation/hw_random.txt159
-rw-r--r--Documentation/hwspinlock.txt527
-rw-r--r--Documentation/input/index.rst1
-rw-r--r--Documentation/intel_txt.txt63
-rw-r--r--Documentation/io-mapping.txt67
-rw-r--r--Documentation/io_ordering.txt62
-rw-r--r--Documentation/iostats.txt76
-rw-r--r--Documentation/irqflags-tracing.txt10
-rw-r--r--Documentation/isa.txt53
-rw-r--r--Documentation/isapnp.txt1
-rw-r--r--Documentation/kdump/kdump.txt12
-rw-r--r--Documentation/kernel-per-CPU-kthreads.txt156
-rw-r--r--Documentation/kobject.txt69
-rw-r--r--Documentation/kprobes.txt475
-rw-r--r--Documentation/kref.txt295
-rw-r--r--Documentation/ldm.txt56
-rw-r--r--Documentation/lockup-watchdogs.txt3
-rw-r--r--Documentation/lzo.txt27
-rw-r--r--Documentation/mailbox.txt185
-rw-r--r--Documentation/memory-barriers.txt6
-rw-r--r--Documentation/memory-hotplug.txt355
-rw-r--r--Documentation/men-chameleon-bus.txt330
-rw-r--r--Documentation/nommu-mmap.txt68
-rw-r--r--Documentation/ntb.txt157
-rw-r--r--Documentation/numastat.txt5
-rw-r--r--Documentation/padata.txt27
-rw-r--r--Documentation/parport-lowlevel.txt1321
-rw-r--r--Documentation/percpu-rw-semaphore.txt3
-rw-r--r--Documentation/phy.txt106
-rw-r--r--Documentation/pi-futex.txt15
-rw-r--r--Documentation/pnp.txt343
-rw-r--r--Documentation/preempt-locking.txt40
-rw-r--r--Documentation/printk-formats.txt384
-rw-r--r--Documentation/pwm.txt46
-rw-r--r--Documentation/rbtree.txt88
-rw-r--r--Documentation/remoteproc.txt328
-rw-r--r--Documentation/rfkill.txt43
-rw-r--r--Documentation/robust-futex-ABI.txt14
-rw-r--r--Documentation/robust-futexes.txt12
-rw-r--r--Documentation/rpmsg.txt348
-rw-r--r--Documentation/rtc.txt46
-rw-r--r--Documentation/security/keys/core.rst6
-rw-r--r--Documentation/sgi-ioc4.txt4
-rw-r--r--Documentation/siphash.txt164
-rw-r--r--Documentation/smsc_ece1099.txt4
-rw-r--r--Documentation/static-keys.txt207
-rw-r--r--Documentation/svga.txt146
-rw-r--r--Documentation/tee.txt53
-rw-r--r--Documentation/this_cpu_ops.txt49
-rw-r--r--Documentation/trace/ftrace.txt508
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt14
-rw-r--r--Documentation/unaligned-memory-access.txt57
-rw-r--r--Documentation/vfio-mediated-device.txt266
-rw-r--r--Documentation/vfio.txt281
-rw-r--r--Documentation/virtual/kvm/api.txt18
-rw-r--r--Documentation/virtual/kvm/msr.txt5
-rw-r--r--Documentation/xillybus.txt29
-rw-r--r--Documentation/xz.txt200
-rw-r--r--Documentation/zorro.txt59
-rw-r--r--MAINTAINERS56
-rw-r--r--Makefile30
-rw-r--r--arch/Kconfig31
-rw-r--r--arch/arc/include/asm/Kbuild24
-rw-r--r--arch/arc/include/uapi/asm/Kbuild24
-rw-r--r--arch/arm/include/asm/Kbuild16
-rw-r--r--arch/arm/include/asm/cacheflush.h2
-rw-r--r--arch/arm/include/asm/flat.h25
-rw-r--r--arch/arm/include/asm/kvm_hyp.h8
-rw-r--r--arch/arm/include/asm/uaccess.h7
-rw-r--r--arch/arm/include/uapi/asm/Kbuild14
-rw-r--r--arch/arm/kernel/bios32.c2
-rw-r--r--arch/arm/mach-sa1100/jornada720_ssp.c2
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/Kbuild17
-rw-r--r--arch/arm64/include/asm/stackprotector.h1
-rw-r--r--arch/arm64/include/asm/string.h5
-rw-r--r--arch/arm64/include/asm/uaccess.h4
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild16
-rw-r--r--arch/arm64/mm/mmap.c7
-rw-r--r--arch/blackfin/include/asm/Kbuild24
-rw-r--r--arch/blackfin/include/asm/flat.h28
-rw-r--r--arch/blackfin/include/asm/nmi.h2
-rw-r--r--arch/blackfin/include/uapi/asm/Kbuild22
-rw-r--r--arch/blackfin/kernel/flat.c17
-rw-r--r--arch/blackfin/kernel/nmi.c2
-rw-r--r--arch/c6x/include/asm/Kbuild28
-rw-r--r--arch/c6x/include/asm/flat.h15
-rw-r--r--arch/c6x/include/uapi/asm/Kbuild25
-rw-r--r--arch/cris/arch-v10/drivers/gpio.c4
-rw-r--r--arch/cris/include/asm/Kbuild21
-rw-r--r--arch/cris/include/uapi/asm/Kbuild17
-rw-r--r--arch/frv/include/asm/tlbflush.h8
-rw-r--r--arch/h8300/include/asm/Kbuild30
-rw-r--r--arch/h8300/include/asm/flat.h24
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild26
-rw-r--r--arch/hexagon/include/asm/Kbuild24
-rw-r--r--arch/hexagon/include/uapi/asm/Kbuild22
-rw-r--r--arch/ia64/include/asm/Kbuild2
-rw-r--r--arch/ia64/include/asm/uaccess.h36
-rw-r--r--arch/ia64/kernel/machine_kexec.c5
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c2
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c4
-rw-r--r--arch/m32r/include/asm/Kbuild4
-rw-r--r--arch/m32r/include/asm/flat.h19
-rw-r--r--arch/m32r/include/uapi/asm/Kbuild3
-rw-r--r--arch/m68k/coldfire/intc-simr.c4
-rw-r--r--arch/m68k/include/asm/Kbuild13
-rw-r--r--arch/m68k/include/asm/flat.h26
-rw-r--r--arch/m68k/include/asm/uaccess.h7
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild10
-rw-r--r--arch/metag/include/asm/Kbuild26
-rw-r--r--arch/metag/include/uapi/asm/Kbuild24
-rw-r--r--arch/microblaze/include/asm/Kbuild25
-rw-r--r--arch/microblaze/include/asm/flat.h34
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild25
-rw-r--r--arch/mips/Kconfig114
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/boot/dts/img/Makefile5
-rw-r--r--arch/mips/boot/dts/img/boston.dts224
-rw-r--r--arch/mips/boot/dts/mti/sead3.dts24
-rw-r--r--arch/mips/configs/ar7_defconfig6
-rw-r--r--arch/mips/configs/ath79_defconfig2
-rw-r--r--arch/mips/configs/bcm63xx_defconfig7
-rw-r--r--arch/mips/configs/bigsur_defconfig4
-rw-r--r--arch/mips/configs/bmips_be_defconfig1
-rw-r--r--arch/mips/configs/capcella_defconfig4
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig1
-rw-r--r--arch/mips/configs/ci20_defconfig1
-rw-r--r--arch/mips/configs/cobalt_defconfig8
-rw-r--r--arch/mips/configs/decstation_defconfig1
-rw-r--r--arch/mips/configs/e55_defconfig2
-rw-r--r--arch/mips/configs/fuloong2e_defconfig11
-rw-r--r--arch/mips/configs/generic/board-boston.config48
-rw-r--r--arch/mips/configs/gpr_defconfig8
-rw-r--r--arch/mips/configs/ip22_defconfig11
-rw-r--r--arch/mips/configs/ip27_defconfig5
-rw-r--r--arch/mips/configs/ip28_defconfig5
-rw-r--r--arch/mips/configs/ip32_defconfig8
-rw-r--r--arch/mips/configs/jazz_defconfig6
-rw-r--r--arch/mips/configs/jmr3927_defconfig7
-rw-r--r--arch/mips/configs/lasat_defconfig5
-rw-r--r--arch/mips/configs/lemote2f_defconfig12
-rw-r--r--arch/mips/configs/loongson3_defconfig3
-rw-r--r--arch/mips/configs/malta_kvm_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig1
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig1
-rw-r--r--arch/mips/configs/maltaaprp_defconfig2
-rw-r--r--arch/mips/configs/maltasmvp_defconfig1
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig2
-rw-r--r--arch/mips/configs/maltaup_defconfig2
-rw-r--r--arch/mips/configs/markeins_defconfig4
-rw-r--r--arch/mips/configs/mips_paravirt_defconfig1
-rw-r--r--arch/mips/configs/mpc30x_defconfig5
-rw-r--r--arch/mips/configs/msp71xx_defconfig2
-rw-r--r--arch/mips/configs/mtx1_defconfig11
-rw-r--r--arch/mips/configs/nlm_xlp_defconfig5
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig9
-rw-r--r--arch/mips/configs/pnx8335_stb225_defconfig7
-rw-r--r--arch/mips/configs/qi_lb60_defconfig3
-rw-r--r--arch/mips/configs/rb532_defconfig6
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig7
-rw-r--r--arch/mips/configs/rm200_defconfig8
-rw-r--r--arch/mips/configs/rt305x_defconfig2
-rw-r--r--arch/mips/configs/sb1250_swarm_defconfig1
-rw-r--r--arch/mips/configs/tb0219_defconfig6
-rw-r--r--arch/mips/configs/tb0226_defconfig7
-rw-r--r--arch/mips/configs/tb0287_defconfig5
-rw-r--r--arch/mips/configs/workpad_defconfig4
-rw-r--r--arch/mips/generic/Kconfig20
-rw-r--r--arch/mips/generic/Makefile1
-rw-r--r--arch/mips/generic/board-sead3.c234
-rw-r--r--arch/mips/generic/init.c27
-rw-r--r--arch/mips/generic/vmlinux.its.S25
-rw-r--r--arch/mips/generic/yamon-dt.c240
-rw-r--r--arch/mips/include/asm/Kbuild2
-rw-r--r--arch/mips/include/asm/branch.h5
-rw-r--r--arch/mips/include/asm/cmpxchg.h280
-rw-r--r--arch/mips/include/asm/cpu-features.h44
-rw-r--r--arch/mips/include/asm/cpu-type.h1
-rw-r--r--arch/mips/include/asm/cpu.h9
-rw-r--r--arch/mips/include/asm/irq.h2
-rw-r--r--arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-dec/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-generic/mc146818rtc.h2
-rw-r--r--arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-jz4740/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-loongson64/boot_param.h22
-rw-r--r--arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-rm/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/machine.h31
-rw-r--r--arch/mips/include/asm/mipsregs.h1
-rw-r--r--arch/mips/include/asm/module.h8
-rw-r--r--arch/mips/include/asm/pgalloc.h2
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/include/asm/spinlock.h426
-rw-r--r--arch/mips/include/asm/spinlock_types.h34
-rw-r--r--arch/mips/include/asm/syscall.h2
-rw-r--r--arch/mips/include/asm/uaccess.h277
-rw-r--r--arch/mips/include/asm/uasm.h30
-rw-r--r--arch/mips/include/asm/vdso.h4
-rw-r--r--arch/mips/include/asm/yamon-dt.h64
-rw-r--r--arch/mips/include/uapi/asm/inst.h20
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/branch.c73
-rw-r--r--arch/mips/kernel/cmpxchg.c109
-rw-r--r--arch/mips/kernel/cps-vec.S7
-rw-r--r--arch/mips/kernel/cpu-probe.c24
-rw-r--r--arch/mips/kernel/mips-cm.c40
-rw-r--r--arch/mips/kernel/module-rela.c202
-rw-r--r--arch/mips/kernel/module.c221
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c14
-rw-r--r--arch/mips/kernel/proc.c3
-rw-r--r--arch/mips/kernel/ptrace.c31
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c40
-rw-r--r--arch/mips/kernel/smp-cps.c35
-rw-r--r--arch/mips/kernel/smp.c3
-rw-r--r--arch/mips/kernel/syscall.c19
-rw-r--r--arch/mips/kernel/unaligned.c213
-rw-r--r--arch/mips/lib/memcpy.S3
-rw-r--r--arch/mips/loongson64/common/env.c12
-rw-r--r--arch/mips/loongson64/common/init.c13
-rw-r--r--arch/mips/loongson64/loongson-3/irq.c58
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c23
-rw-r--r--arch/mips/math-emu/cp1emu.c43
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c41
-rw-r--r--arch/mips/mm/uasm-micromips.c188
-rw-r--r--arch/mips/mm/uasm-mips.c238
-rw-r--r--arch/mips/mm/uasm.c61
-rw-r--r--arch/mips/net/Makefile3
-rw-r--r--arch/mips/vdso/gettimeofday.c59
-rw-r--r--arch/mn10300/include/asm/nmi.h2
-rw-r--r--arch/mn10300/kernel/mn10300-watchdog-low.S8
-rw-r--r--arch/mn10300/kernel/mn10300-watchdog.c2
-rw-r--r--arch/nios2/include/asm/Kbuild26
-rw-r--r--arch/nios2/include/asm/signal.h22
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild23
-rw-r--r--arch/openrisc/include/asm/Kbuild29
-rw-r--r--arch/openrisc/include/uapi/asm/Kbuild27
-rw-r--r--arch/parisc/include/asm/Kbuild7
-rw-r--r--arch/parisc/include/asm/uaccess.h1
-rw-r--r--arch/parisc/include/uapi/asm/Kbuild4
-rw-r--r--arch/powerpc/Kconfig7
-rw-r--r--arch/powerpc/include/asm/atomic.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h2
-rw-r--r--arch/powerpc/include/asm/nmi.h11
-rw-r--r--arch/powerpc/include/asm/opal-api.h9
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h3
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S13
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c16
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S30
-rw-r--r--arch/powerpc/kernel/fadump.c3
-rw-r--r--arch/powerpc/kernel/kvm.c7
-rw-r--r--arch/powerpc/kernel/mce_power.c56
-rw-r--r--arch/powerpc/kernel/misc_64.S12
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c19
-rw-r--r--arch/powerpc/kernel/smp.c20
-rw-r--r--arch/powerpc/kernel/watchdog.c386
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/lib/feature-fixups.c180
-rw-r--r--arch/powerpc/lib/sstep.c19
-rw-r--r--arch/powerpc/mm/mmap.c28
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c20
-rw-r--r--arch/powerpc/perf/isa207-common.c6
-rw-r--r--arch/powerpc/perf/power9-events-list.h4
-rw-r--r--arch/powerpc/perf/power9-pmu.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c22
-rw-r--r--arch/powerpc/platforms/powernv/opal.c19
-rw-r--r--arch/powerpc/platforms/powernv/setup.c11
-rw-r--r--arch/s390/include/asm/uaccess.h3
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/setup.c6
-rw-r--r--arch/sh/include/asm/Kbuild19
-rw-r--r--arch/sh/include/asm/bug.h1
-rw-r--r--arch/sh/include/asm/flat.h15
-rw-r--r--arch/sh/include/asm/stackprotector.h1
-rw-r--r--arch/sh/include/uapi/asm/Kbuild18
-rw-r--r--arch/sh/mm/cache-sh5.c2
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/nmi.h1
-rw-r--r--arch/sparc/include/asm/trap_block.h1
-rw-r--r--arch/sparc/include/asm/uaccess_64.h1
-rw-r--r--arch/sparc/include/uapi/asm/Kbuild2
-rw-r--r--arch/sparc/kernel/mdesc.c2
-rw-r--r--arch/sparc/kernel/nmi.c6
-rw-r--r--arch/sparc/kernel/pci_sun4v.c12
-rw-r--r--arch/sparc/kernel/smp_64.c185
-rw-r--r--arch/sparc/kernel/sun4v_ivec.S15
-rw-r--r--arch/sparc/kernel/traps_64.c1
-rw-r--r--arch/tile/include/asm/Kbuild19
-rw-r--r--arch/tile/include/asm/uaccess.h1
-rw-r--r--arch/tile/include/uapi/arch/abi.h49
-rw-r--r--arch/tile/include/uapi/arch/intreg.h70
-rw-r--r--arch/tile/include/uapi/asm/Kbuild19
-rw-r--r--arch/tile/mm/init.c30
-rw-r--r--arch/um/Makefile4
-rw-r--r--arch/um/drivers/stdio_console.c3
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/um/include/asm/io.h17
-rw-r--r--arch/um/include/shared/os.h4
-rw-r--r--arch/um/include/shared/skas/stub-data.h2
-rw-r--r--arch/um/kernel/physmem.c10
-rw-r--r--arch/um/kernel/trap.c10
-rw-r--r--arch/um/kernel/um_arch.c16
-rw-r--r--arch/um/kernel/umid.c4
-rw-r--r--arch/um/os-Linux/execvp.c2
-rw-r--r--arch/um/os-Linux/main.c9
-rw-r--r--arch/um/os-Linux/mem.c28
-rw-r--r--arch/um/os-Linux/skas/process.c41
-rw-r--r--arch/um/os-Linux/start_up.c28
-rw-r--r--arch/um/os-Linux/umid.c19
-rw-r--r--arch/um/os-Linux/util.c34
-rw-r--r--arch/unicore32/include/asm/Kbuild30
-rw-r--r--arch/unicore32/include/uapi/asm/Kbuild27
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/boot/compressed/misc.c5
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c2
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/efi.h4
-rw-r--r--arch/x86/include/asm/kprobes.h8
-rw-r--r--arch/x86/include/asm/kvm_emulate.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h11
-rw-r--r--arch/x86/include/asm/paravirt_types.h16
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/stackprotector.h1
-rw-r--r--arch/x86/include/asm/string_32.h9
-rw-r--r--arch/x86/include/asm/string_64.h7
-rw-r--r--arch/x86/include/asm/svm.h5
-rw-r--r--arch/x86/include/asm/uaccess.h3
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c2
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/kvm.c7
-rw-r--r--arch/x86/kvm/hyperv.c67
-rw-r--r--arch/x86/kvm/hyperv.h3
-rw-r--r--arch/x86/kvm/i8254.c2
-rw-r--r--arch/x86/kvm/mmu.c35
-rw-r--r--arch/x86/kvm/mmu.h3
-rw-r--r--arch/x86/kvm/svm.c151
-rw-r--r--arch/x86/kvm/vmx.c162
-rw-r--r--arch/x86/kvm/x86.c43
-rw-r--r--arch/x86/lib/memcpy_32.c2
-rw-r--r--arch/x86/mm/mmap.c7
-rw-r--r--arch/x86/um/os-Linux/registers.c12
-rw-r--r--arch/x86/um/setjmp_32.S16
-rw-r--r--arch/x86/um/setjmp_64.S16
-rw-r--r--arch/x86/um/user-offsets.c6
-rw-r--r--arch/x86/xen/mmu_pv.c4
-rw-r--r--arch/xtensa/include/asm/Kbuild11
-rw-r--r--arch/xtensa/include/asm/fb.h12
-rw-r--r--arch/xtensa/include/asm/flat.h15
-rw-r--r--arch/xtensa/include/uapi/asm/Kbuild9
-rw-r--r--certs/Makefile6
-rw-r--r--crypto/af_alg.c2
-rw-r--r--drivers/acpi/ec.c21
-rw-r--r--drivers/acpi/irq.c4
-rw-r--r--drivers/acpi/nfit/core.c10
-rw-r--r--drivers/acpi/x86/utils.c41
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/power/domain.c8
-rw-r--r--drivers/base/power/sysfs.c2
-rw-r--r--drivers/base/property.c6
-rw-r--r--drivers/base/regmap/regmap-w1.c4
-rw-r--r--drivers/char/random.c101
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/imgtec/Kconfig9
-rw-r--r--drivers/clk/imgtec/Makefile1
-rw-r--r--drivers/clk/imgtec/clk-boston.c103
-rw-r--r--drivers/cpufreq/arm_big_little.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c13
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c23
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c4
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c3
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/caam/caamalg.c20
-rw-r--r--drivers/crypto/caam/caamhash.c2
-rw-r--r--drivers/crypto/caam/key_gen.c2
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c8
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c23
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/dax/device-dax.h2
-rw-r--r--drivers/dax/device.c33
-rw-r--r--drivers/dax/pmem.c12
-rw-r--r--drivers/devfreq/governor_userspace.c2
-rw-r--r--drivers/devfreq/rk3399_dmc.c5
-rw-r--r--drivers/devfreq/tegra-devfreq.c6
-rw-r--r--drivers/firmware/efi/libstub/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/panel.c2
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c109
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c1
-rw-r--r--drivers/gpu/drm/drm_ioc32.c3
-rw-r--r--drivers/gpu/drm/drm_vblank.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c38
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c12
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c20
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c14
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c6
-rw-r--r--drivers/gpu/drm/mediatek/Makefile3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c176
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c80
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c33
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c27
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c13
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c5
-rw-r--r--drivers/hid/hid-logitech-hidpp.c3
-rw-r--r--drivers/hid/hid-multitouch.c16
-rw-r--r--drivers/infiniband/core/addr.c46
-rw-r--r--drivers/infiniband/core/cma.c32
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c23
-rw-r--r--drivers/infiniband/core/verbs.c51
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c86
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c40
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c36
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c48
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c31
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c11
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4
-rw-r--r--drivers/input/keyboard/gpio_keys.c17
-rw-r--r--drivers/input/misc/xen-kbdfront.c219
-rw-r--r--drivers/input/serio/i8042.c12
-rw-r--r--drivers/isdn/divert/isdn_divert.c25
-rw-r--r--drivers/isdn/hardware/avm/c4.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c2
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c2
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/mmc/core/block.c6
-rw-r--r--drivers/mmc/host/sdhci-acpi.c70
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c19
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mtd/Kconfig4
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/bcm47xxpart.c99
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c2
-rw-r--r--drivers/mtd/devices/Kconfig10
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/m25p80.c121
-rw-r--r--drivers/mtd/devices/mchp23k256.c236
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c200
-rw-r--r--drivers/mtd/devices/serial_flash_cmds.h1
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c4
-rw-r--r--drivers/mtd/maps/physmap_of_gemini.c2
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdpart.c370
-rw-r--r--drivers/mtd/nand/Kconfig3
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c354
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c2
-rw-r--r--drivers/mtd/nand/cafe_nand.c2
-rw-r--r--drivers/mtd/nand/davinci_nand.c3
-rw-r--r--drivers/mtd/nand/denali.c1831
-rw-r--r--drivers/mtd/nand/denali.h315
-rw-r--r--drivers/mtd/nand/denali_dt.c53
-rw-r--r--drivers/mtd/nand/denali_pci.c26
-rw-r--r--drivers/mtd/nand/docg4.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c81
-rw-r--r--drivers/mtd/nand/fsmc_nand.c122
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c6
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c75
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h13
-rw-r--r--drivers/mtd/nand/hisi504_nand.c2
-rw-r--r--drivers/mtd/nand/jz4780_nand.c2
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c2
-rw-r--r--drivers/mtd/nand/mtk_ecc.c228
-rw-r--r--drivers/mtd/nand/mtk_ecc.h2
-rw-r--r--drivers/mtd/nand/mtk_nand.c279
-rw-r--r--drivers/mtd/nand/mxc_nand.c12
-rw-r--r--drivers/mtd/nand/nand_base.c349
-rw-r--r--drivers/mtd/nand/nand_micron.c222
-rw-r--r--drivers/mtd/nand/orion_nand.c6
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2
-rw-r--r--drivers/mtd/nand/qcom_nandc.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c5
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c9
-rw-r--r--drivers/mtd/nand/tango_nand.c22
-rw-r--r--drivers/mtd/nand/vf610_nfc.c2
-rw-r--r--drivers/mtd/parsers/Kconfig8
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/parser_trx.c126
-rw-r--r--drivers/mtd/spi-nor/Kconfig2
-rw-r--r--drivers/mtd/spi-nor/aspeed-smc.c183
-rw-r--r--drivers/mtd/spi-nor/atmel-quadspi.c83
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c20
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c6
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c31
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c7
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c15
-rw-r--r--drivers/mtd/spi-nor/nxp-spifi.c22
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c488
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c32
-rw-r--r--drivers/mtd/tests/subpagetest.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c21
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c70
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c299
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c10
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c49
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/phy/mdio-mux.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c30
-rw-r--r--drivers/net/usb/cdc_ncm.c28
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c6
-rw-r--r--drivers/net/usb/smsc95xx.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/ntb/hw/Kconfig1
-rw-r--r--drivers/ntb/hw/Makefile1
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c139
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.h3
-rw-r--r--drivers/ntb/hw/idt/Kconfig31
-rw-r--r--drivers/ntb/hw/idt/Makefile1
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c2712
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.h1149
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c298
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h3
-rw-r--r--drivers/ntb/ntb.c69
-rw-r--r--drivers/ntb/ntb_transport.c42
-rw-r--r--drivers/ntb/test/ntb_perf.c109
-rw-r--r--drivers/ntb/test/ntb_pingpong.c14
-rw-r--r--drivers/ntb/test/ntb_tool.c69
-rw-r--r--drivers/nvdimm/core.c7
-rw-r--r--drivers/pci/host/pcie-rockchip.c2
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci.c16
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/pme.c35
-rw-r--r--drivers/platform/mips/cpu_hwmon.c136
-rw-r--r--drivers/platform/x86/alienware-wmi.c6
-rw-r--r--drivers/platform/x86/asus-wmi.c4
-rw-r--r--drivers/platform/x86/compal-laptop.c2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c14
-rw-r--r--drivers/platform/x86/ideapad-laptop.c85
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c33
-rw-r--r--drivers/platform/x86/panasonic-laptop.c2
-rw-r--r--drivers/platform/x86/peaq-wmi.c2
-rw-r--r--drivers/platform/x86/samsung-laptop.c2
-rw-r--r--drivers/platform/x86/silead_dmi.c32
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/power/supply/twl4030_charger.c43
-rw-r--r--drivers/pwm/core.c4
-rw-r--r--drivers/pwm/pwm-bfin.c4
-rw-r--r--drivers/pwm/pwm-cros-ec.c4
-rw-r--r--drivers/pwm/pwm-hibvt.c2
-rw-r--r--drivers/pwm/pwm-meson.c48
-rw-r--r--drivers/pwm/pwm-sun4i.c263
-rw-r--r--drivers/pwm/pwm-tegra.c18
-rw-r--r--drivers/rtc/Kconfig37
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/class.c202
-rw-r--r--drivers/rtc/interface.c9
-rw-r--r--drivers/rtc/nvmem.c113
-rw-r--r--drivers/rtc/rtc-at91rm9200.c14
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c330
-rw-r--r--drivers/rtc/rtc-core.h8
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c957
-rw-r--r--drivers/rtc/rtc-ds3232.c119
-rw-r--r--drivers/rtc/rtc-ftrtc010.c (renamed from drivers/rtc/rtc-gemini.c)119
-rw-r--r--drivers/rtc/rtc-m41t80.c251
-rw-r--r--drivers/rtc/rtc-mxc.c11
-rw-r--r--drivers/rtc/rtc-nuc900.c2
-rw-r--r--drivers/rtc/rtc-opal.c32
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-rv8803.c72
-rw-r--r--drivers/rtc/rtc-s3c.c147
-rw-r--r--drivers/rtc/rtc-st-lpc.c19
-rw-r--r--drivers/rtc/rtc-stm32.c82
-rw-r--r--drivers/rtc/rtc-sysfs.c3
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/cxlflash/main.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c10
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c156
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h25
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.h5
-rw-r--r--drivers/scsi/isci/request.c14
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi.h17
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c419
-rw-r--r--drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h210
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c40
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/scsi/sg.c5
-rw-r--r--drivers/scsi/virtio_scsi.c1
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c30
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c41
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c27
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c60
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/loopback/tcm_loop.c77
-rw-r--r--drivers/target/loopback/tcm_loop.h6
-rw-r--r--drivers/target/target_core_alua.c8
-rw-r--r--drivers/target/target_core_configfs.c30
-rw-r--r--drivers/target/target_core_device.c145
-rw-r--r--drivers/target/target_core_fabric_configfs.c25
-rw-r--r--drivers/target/target_core_fabric_lib.c6
-rw-r--r--drivers/target/target_core_file.c7
-rw-r--r--drivers/target/target_core_iblock.c52
-rw-r--r--drivers/target/target_core_internal.h5
-rw-r--r--drivers/target/target_core_pr.c109
-rw-r--r--drivers/target/target_core_pscsi.c82
-rw-r--r--drivers/target/target_core_pscsi.h4
-rw-r--r--drivers/target/target_core_rd.c11
-rw-r--r--drivers/target/target_core_sbc.c65
-rw-r--r--drivers/target/target_core_spc.c42
-rw-r--r--drivers/target/target_core_tmr.c18
-rw-r--r--drivers/target/target_core_tpg.c1
-rw-r--r--drivers/target/target_core_transport.c224
-rw-r--r--drivers/target/target_core_user.c447
-rw-r--r--drivers/target/target_core_xcopy.c184
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c1
-rw-r--r--drivers/thermal/cpu_cooling.c609
-rw-r--r--drivers/thermal/fair_share.c1
-rw-r--r--drivers/thermal/hisi_thermal.c5
-rw-r--r--drivers/thermal/imx_thermal.c27
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c6
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c12
-rw-r--r--drivers/thermal/step_wise.c3
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c22
-rw-r--r--drivers/thermal/user_space.c3
-rw-r--r--drivers/tty/serial/ioc3_serial.c4
-rw-r--r--drivers/tty/serial/ioc4_serial.c4
-rw-r--r--drivers/usb/serial/safe_serial.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/vfio.c86
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/scsi.c13
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/console/mdacon.c89
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c10
-rw-r--r--drivers/video/fbdev/core/fbmem.c5
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c4
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/fbdev/omap/lcdc.c6
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c8
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c14
-rw-r--r--drivers/video/fbdev/pxafb.c5
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/fbdev/uvesafb.c148
-rw-r--r--drivers/video/fbdev/vermilion/cr_pll.c1
-rw-r--r--drivers/xen/xen-scsiback.c36
-rw-r--r--fs/9p/v9fs.c61
-rw-r--r--fs/9p/v9fs.h3
-rw-r--r--fs/9p/vfs_super.c6
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/affs/super.c42
-rw-r--r--fs/afs/super.c45
-rw-r--r--fs/befs/btree.c15
-rw-r--r--fs/befs/linuxvfs.c24
-rw-r--r--fs/bfs/inode.c2
-rw-r--r--fs/binfmt_flat.c41
-rw-r--r--fs/btrfs/compression.c1
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/extent_io.c19
-rw-r--r--fs/btrfs/extent_io.h4
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/btrfs/raid56.c26
-rw-r--r--fs/btrfs/send.c88
-rw-r--r--fs/btrfs/super.c1
-rw-r--r--fs/ceph/dir.c5
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/dcache.c8
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/efivarfs/super.c1
-rw-r--r--fs/eventpoll.c52
-rw-r--r--fs/ext2/acl.c43
-rw-r--r--fs/ext2/inode.c1
-rw-r--r--fs/f2fs/acl.c2
-rw-r--r--fs/f2fs/checkpoint.c10
-rw-r--r--fs/f2fs/file.c5
-rw-r--r--fs/f2fs/sysfs.c1
-rw-r--r--fs/filesystems.c3
-rw-r--r--fs/fs-writeback.c8
-rw-r--r--fs/gfs2/dir.c3
-rw-r--r--fs/hfsplus/posix_acl.c30
-rw-r--r--fs/hugetlbfs/inode.c70
-rw-r--r--fs/iomap.c8
-rw-r--r--fs/isofs/inode.c59
-rw-r--r--fs/isofs/isofs.h3
-rw-r--r--fs/lockd/clnt4xdr.c34
-rw-r--r--fs/lockd/clntxdr.c58
-rw-r--r--fs/lockd/mon.c38
-rw-r--r--fs/lockd/svc.c38
-rw-r--r--fs/lockd/svc4proc.c124
-rw-r--r--fs/lockd/svcproc.c124
-rw-r--r--fs/lockd/xdr.c43
-rw-r--r--fs/lockd/xdr4.c43
-rw-r--r--fs/mount.h4
-rw-r--r--fs/namei.c5
-rw-r--r--fs/namespace.c63
-rw-r--r--fs/nfs/Makefile2
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/callback.h27
-rw-r--r--fs/nfs/callback_proc.c33
-rw-r--r--fs/nfs/callback_xdr.c113
-rw-r--r--fs/nfs/dir.c42
-rw-r--r--fs/nfs/export.c177
-rw-r--r--fs/nfs/filelayout/filelayout.c31
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c33
-rw-r--r--fs/nfs/inode.c36
-rw-r--r--fs/nfs/internal.h18
-rw-r--r--fs/nfs/mount_clnt.c29
-rw-r--r--fs/nfs/nfs2xdr.c72
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs3xdr.c153
-rw-r--r--fs/nfs/nfs42proc.c2
-rw-r--r--fs/nfs/nfs42xdr.c52
-rw-r--r--fs/nfs/nfs4_fs.h6
-rw-r--r--fs/nfs/nfs4client.c5
-rw-r--r--fs/nfs/nfs4idmap.c3
-rw-r--r--fs/nfs/nfs4proc.c109
-rw-r--r--fs/nfs/nfs4state.c16
-rw-r--r--fs/nfs/nfs4trace.h29
-rw-r--r--fs/nfs/nfs4xdr.c408
-rw-r--r--fs/nfs/pagelist.c23
-rw-r--r--fs/nfs/proc.c2
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfs/unlink.c13
-rw-r--r--fs/nfs/write.c59
-rw-r--r--fs/nfsd/current_stateid.h36
-rw-r--r--fs/nfsd/nfs2acl.c116
-rw-r--r--fs/nfsd/nfs3acl.c75
-rw-r--r--fs/nfsd/nfs3proc.c301
-rw-r--r--fs/nfsd/nfs3xdr.c166
-rw-r--r--fs/nfsd/nfs4callback.c38
-rw-r--r--fs/nfsd/nfs4proc.c412
-rw-r--r--fs/nfsd/nfs4state.c142
-rw-r--r--fs/nfsd/nfs4xdr.c15
-rw-r--r--fs/nfsd/nfsd.h6
-rw-r--r--fs/nfsd/nfsfh.h24
-rw-r--r--fs/nfsd/nfsproc.c206
-rw-r--r--fs/nfsd/nfssvc.c24
-rw-r--r--fs/nfsd/nfsxdr.c92
-rw-r--r--fs/nfsd/xdr.h50
-rw-r--r--fs/nfsd/xdr3.h100
-rw-r--r--fs/nfsd/xdr4.h78
-rw-r--r--fs/nsfs.c3
-rw-r--r--fs/omfs/inode.c33
-rw-r--r--fs/orangefs/super.c15
-rw-r--r--fs/pipe.c3
-rw-r--r--fs/proc/base.c45
-rw-r--r--fs/proc/internal.h8
-rw-r--r--fs/proc/proc_sysctl.c69
-rw-r--r--fs/pstore/inode.c14
-rw-r--r--fs/pstore/internal.h3
-rw-r--r--fs/pstore/platform.c2
-rw-r--r--fs/ramfs/inode.c32
-rw-r--r--fs/reiserfs/bitmap.c14
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/reiserfs/xattr_acl.c17
-rw-r--r--fs/super.c5
-rw-r--r--fs/tracefs/inode.c2
-rw-r--r--fs/ubifs/crypto.c7
-rw-r--r--fs/ubifs/dir.c32
-rw-r--r--fs/ubifs/file.c27
-rw-r--r--fs/ubifs/journal.c36
-rw-r--r--fs/ubifs/key.h1
-rw-r--r--fs/ubifs/super.c11
-rw-r--r--fs/ubifs/tnc.c140
-rw-r--r--fs/ubifs/tnc_commit.c4
-rw-r--r--fs/ubifs/ubifs.h6
-rw-r--r--fs/ubifs/xattr.c39
-rw-r--r--fs/udf/file.c12
-rw-r--r--fs/udf/inode.c4
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/udf/udftime.c98
-rw-r--r--fs/xfs/kmem.h10
-rw-r--r--fs/xfs/libxfs/xfs_attr.c4
-rw-r--r--fs/xfs/libxfs/xfs_quota_defs.h2
-rw-r--r--fs/xfs/xfs_attr_list.c2
-rw-r--r--fs/xfs/xfs_dquot.c14
-rw-r--r--fs/xfs/xfs_symlink.c2
-rw-r--r--include/asm-generic/uaccess-unaligned.h26
-rw-r--r--include/drm/bridge/dw_hdmi.h70
-rw-r--r--include/dt-bindings/clock/boston-clock.h14
-rw-r--r--include/linux/backing-dev.h24
-rw-r--r--include/linux/binfmts.h4
-rw-r--r--include/linux/bpf-cgroup.h2
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--include/linux/cdev.h2
-rw-r--r--include/linux/ceph/ceph_features.h8
-rw-r--r--include/linux/clk.h22
-rw-r--r--include/linux/compiler-gcc.h13
-rw-r--r--include/linux/compiler.h5
-rw-r--r--include/linux/cpu_cooling.h32
-rw-r--r--include/linux/cpufreq.h14
-rw-r--r--include/linux/crash_core.h7
-rw-r--r--include/linux/cred.h4
-rw-r--r--include/linux/dcache.h11
-rw-r--r--include/linux/eventpoll.h5
-rw-r--r--include/linux/flat.h2
-rw-r--r--include/linux/fs.h28
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/fwnode.h4
-rw-r--r--include/linux/gfp.h56
-rw-r--r--include/linux/hugetlb.h3
-rw-r--r--include/linux/ipc.h5
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/jhash.h29
-rw-r--r--include/linux/kernel.h10
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/key-type.h4
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kvm_host.h17
-rw-r--r--include/linux/llist.h21
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/lockd/xdr.h26
-rw-r--r--include/linux/lockd/xdr4.h26
-rw-r--r--include/linux/lsm_hooks.h4
-rw-r--r--include/linux/migrate.h2
-rw-r--r--include/linux/mlx4/device.h10
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/msg.h2
-rw-r--r--include/linux/mtd/nand.h80
-rw-r--r--include/linux/mtd/partitions.h7
-rw-r--r--include/linux/mtd/spi-nor.h161
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netfilter.h9
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nfs_page.h4
-rw-r--r--include/linux/nfs_xdr.h31
-rw-r--r--include/linux/nmi.h57
-rw-r--r--include/linux/ntb.h721
-rw-r--r--include/linux/nvmem-provider.h3
-rw-r--r--include/linux/once.h2
-rw-r--r--include/linux/path.h2
-rw-r--r--include/linux/pid_namespace.h2
-rw-r--r--include/linux/platform_data/usb-ohci-s3c2410.h2
-rw-r--r--include/linux/proc_ns.h2
-rw-r--r--include/linux/random.h47
-rw-r--r--include/linux/rtc.h21
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/sched/signal.h2
-rw-r--r--include/linux/sem.h26
-rw-r--r--include/linux/shm.h2
-rw-r--r--include/linux/slab.h3
-rw-r--r--include/linux/string.h202
-rw-r--r--include/linux/sunrpc/clnt.h6
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/sunrpc/svc.h23
-rw-r--r--include/linux/sunrpc/svc_rdma.h46
-rw-r--r--include/linux/sunrpc/xdr.h15
-rw-r--r--include/linux/sysctl.h7
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/tty_driver.h4
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/user_namespace.h2
-rw-r--r--include/linux/utsname.h2
-rw-r--r--include/linux/vfio.h2
-rw-r--r--include/net/9p/client.h13
-rw-r--r--include/net/9p/transport.h1
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netlink.h4
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sock.h2
-rw-r--r--include/rdma/ib_verbs.h18
-rw-r--r--include/rdma/rdma_vt.h5
-rw-r--r--include/scsi/scsi_proto.h1
-rw-r--r--include/target/iscsi/iscsi_target_core.h10
-rw-r--r--include/target/target_core_backend.h17
-rw-r--r--include/target/target_core_base.h11
-rw-r--r--include/target/target_core_fabric.h1
-rw-r--r--include/trace/events/mmflags.h2
-rw-r--r--include/uapi/linux/kcmp.h10
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--include/uapi/linux/sem.h2
-rw-r--r--include/uapi/linux/target_core_user.h12
-rw-r--r--init/main.c1
-rw-r--r--ipc/msg.c28
-rw-r--r--ipc/sem.c155
-rw-r--r--ipc/shm.c26
-rw-r--r--ipc/util.c65
-rw-r--r--ipc/util.h23
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit.c1
-rw-r--r--kernel/bpf/inode.c16
-rw-r--r--kernel/bpf/verifier.c108
-rw-r--r--kernel/cpu.c3
-rw-r--r--kernel/crash_core.c44
-rw-r--r--kernel/fork.c22
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/irq/manage.c63
-rw-r--r--kernel/kcmp.c57
-rw-r--r--kernel/kexec.c8
-rw-r--r--kernel/kexec_core.c39
-rw-r--r--kernel/kexec_file.c15
-rw-r--r--kernel/kexec_internal.h2
-rw-r--r--kernel/kmod.c16
-rw-r--r--kernel/ksysfs.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c5
-rw-r--r--kernel/sysctl.c335
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace.c142
-rw-r--r--kernel/trace/trace_kprobe.c9
-rw-r--r--kernel/trace/trace_stack.c6
-rw-r--r--kernel/watchdog.c289
-rw-r--r--kernel/watchdog_hld.c37
-rw-r--r--lib/Kconfig.debug111
-rw-r--r--lib/Makefile2
-rw-r--r--lib/atomic64_test.c7
-rw-r--r--lib/fault-inject.c10
-rw-r--r--lib/percpu_counter.c7
-rw-r--r--lib/rhashtable.c2
-rw-r--r--lib/string.c7
-rw-r--r--lib/test_kmod.c1246
-rw-r--r--lib/test_sysctl.c148
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/internal.h2
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page-writeback.c10
-rw-r--r--mm/page_alloc.c14
-rw-r--r--mm/sparse-vmemmap.c4
-rw-r--r--mm/util.c38
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/vmscan.c8
-rw-r--r--net/9p/client.c25
-rw-r--r--net/9p/trans_fd.c31
-rw-r--r--net/9p/trans_rdma.c31
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/ceph/ceph_common.c6
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/ceph/osd_client.c5
-rw-r--r--net/ceph/osdmap.c31
-rw-r--r--net/compat.c49
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/dev_ioctl.c3
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/ipv4/fib_frontend.c9
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/netfilter/nf_tables_arp.c3
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/syncookies.c1
-rw-r--r--net/ipv4/tcp_bbr.c49
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/output_core.c8
-rw-r--r--net/ipv6/syncookies.c1
-rw-r--r--net/netfilter/core.c147
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_nat_core.c17
-rw-r--r--net/netfilter/nfnetlink.c6
-rw-r--r--net/netfilter/x_tables.c12
-rw-r--r--net/openvswitch/conntrack.c51
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/rds/send.c6
-rw-r--r--net/sched/act_api.c4
-rw-r--r--net/sched/sch_fq.c2
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/socket.c31
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c3
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c9
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c14
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.h4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c8
-rw-r--r--net/sunrpc/clnt.c16
-rw-r--r--net/sunrpc/rpcb_clnt.c82
-rw-r--r--net/sunrpc/stats.c16
-rw-r--r--net/sunrpc/svc.c35
-rw-r--r--net/sunrpc/svc_xprt.c10
-rw-r--r--net/sunrpc/xprt.c8
-rw-r--r--net/sunrpc/xprtrdma/Makefile4
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c47
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c69
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c125
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_marshal.c168
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c734
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c449
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c15
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c250
-rw-r--r--net/sunrpc/xprtrdma/transport.c3
-rw-r--r--net/sunrpc/xprtrdma/verbs.c55
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h40
-rw-r--r--samples/kfifo/dma-example.c8
-rw-r--r--scripts/Makefile.headersinst29
-rwxr-xr-xscripts/checkpatch.pl12
-rw-r--r--scripts/gdb/linux/constants.py.in7
-rw-r--r--scripts/gdb/linux/dmesg.py15
-rw-r--r--scripts/gdb/linux/proc.py73
-rw-r--r--security/Kconfig7
-rw-r--r--security/keys/compat_dh.c2
-rw-r--r--security/keys/dh.c5
-rw-r--r--security/keys/internal.h2
-rw-r--r--sound/core/pcm_native.c8
-rw-r--r--sound/drivers/opl4/opl4_lib.c2
-rw-r--r--sound/isa/msnd/msnd_midi.c30
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c23
-rw-r--r--sound/pci/hda/patch_hdmi.c5
-rw-r--r--sound/pci/hda/patch_realtek.c16
-rw-r--r--tools/lib/bpf/bpf.c4
-rw-r--r--tools/lib/bpf/bpf.h2
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/testing/selftests/bpf/test_align.c2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c480
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc36
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_module.tc28
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc4
-rw-r--r--tools/testing/selftests/kmod/Makefile11
-rw-r--r--tools/testing/selftests/kmod/config7
-rw-r--r--tools/testing/selftests/kmod/kmod.sh615
-rwxr-xr-xtools/testing/selftests/ntb/ntb_test.sh11
-rw-r--r--tools/testing/selftests/sysctl/Makefile3
-rw-r--r--tools/testing/selftests/sysctl/common_tests131
-rw-r--r--tools/testing/selftests/sysctl/config1
-rwxr-xr-xtools/testing/selftests/sysctl/run_numerictests10
-rwxr-xr-xtools/testing/selftests/sysctl/run_stringtests77
-rw-r--r--tools/testing/selftests/sysctl/sysctl.sh774
-rw-r--r--tools/testing/selftests/timers/Makefile2
-rw-r--r--tools/testing/selftests/timers/rtctest.c128
-rw-r--r--tools/testing/selftests/timers/rtctest_setdate.c86
-rw-r--r--virt/kvm/eventfd.c8
-rw-r--r--virt/kvm/irqchip.c2
-rw-r--r--virt/kvm/kvm_main.c131
-rw-r--r--virt/kvm/vfio.c40
1200 files changed, 38137 insertions, 18852 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
index 3b5c3bca9186..f34e592301d1 100644
--- a/Documentation/ABI/testing/sysfs-class-mtd
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -229,6 +229,6 @@ KernelVersion: 4.1
Contact: linux-mtd@lists.infradead.org
Description:
For a partition, the offset of that partition from the start
- of the master device in bytes. This attribute is absent on
- main devices, so it can be used to distinguish between
- partitions and devices that aren't partitions.
+ of the parent (another partition or a flash device) in bytes.
+ This attribute is absent on flash devices, so it can be used
+ to distinguish them from partitions.
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 4ed388356898..f0cc3f772265 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -1,22 +1,24 @@
- Dynamic DMA mapping Guide
- =========================
+=========================
+Dynamic DMA mapping Guide
+=========================
- David S. Miller <davem@redhat.com>
- Richard Henderson <rth@cygnus.com>
- Jakub Jelinek <jakub@redhat.com>
+:Author: David S. Miller <davem@redhat.com>
+:Author: Richard Henderson <rth@cygnus.com>
+:Author: Jakub Jelinek <jakub@redhat.com>
This is a guide to device driver writers on how to use the DMA API
with example pseudo-code. For a concise description of the API, see
DMA-API.txt.
- CPU and DMA addresses
+CPU and DMA addresses
+=====================
There are several kinds of addresses involved in the DMA API, and it's
important to understand the differences.
The kernel normally uses virtual addresses. Any address returned by
kmalloc(), vmalloc(), and similar interfaces is a virtual address and can
-be stored in a "void *".
+be stored in a ``void *``.
The virtual memory system (TLB, page tables, etc.) translates virtual
addresses to CPU physical addresses, which are stored as "phys_addr_t" or
@@ -37,7 +39,7 @@ be restricted to a subset of that space. For example, even if a system
supports 64-bit addresses for main memory and PCI BARs, it may use an IOMMU
so devices only need to use 32-bit DMA addresses.
-Here's a picture and some examples:
+Here's a picture and some examples::
CPU CPU Bus
Virtual Physical Address
@@ -98,15 +100,16 @@ microprocessor architecture. You should use the DMA API rather than the
bus-specific DMA API, i.e., use the dma_map_*() interfaces rather than the
pci_map_*() interfaces.
-First of all, you should make sure
+First of all, you should make sure::
-#include <linux/dma-mapping.h>
+ #include <linux/dma-mapping.h>
is in your driver, which provides the definition of dma_addr_t. This type
can hold any valid DMA address for the platform and should be used
everywhere you hold a DMA address returned from the DMA mapping functions.
- What memory is DMA'able?
+What memory is DMA'able?
+========================
The first piece of information you must know is what kernel memory can
be used with the DMA mapping facilities. There has been an unwritten
@@ -143,7 +146,8 @@ What about block I/O and networking buffers? The block I/O and
networking subsystems make sure that the buffers they use are valid
for you to DMA from/to.
- DMA addressing limitations
+DMA addressing limitations
+==========================
Does your device have any DMA addressing limitations? For example, is
your device only capable of driving the low order 24-bits of address?
@@ -166,7 +170,7 @@ style to do this even if your device holds the default setting,
because this shows that you did think about these issues wrt. your
device.
-The query is performed via a call to dma_set_mask_and_coherent():
+The query is performed via a call to dma_set_mask_and_coherent()::
int dma_set_mask_and_coherent(struct device *dev, u64 mask);
@@ -175,12 +179,12 @@ If you have some special requirements, then the following two separate
queries can be used instead:
The query for streaming mappings is performed via a call to
- dma_set_mask():
+ dma_set_mask()::
int dma_set_mask(struct device *dev, u64 mask);
The query for consistent allocations is performed via a call
- to dma_set_coherent_mask():
+ to dma_set_coherent_mask()::
int dma_set_coherent_mask(struct device *dev, u64 mask);
@@ -209,7 +213,7 @@ of your driver reports that performance is bad or that the device is not
even detected, you can ask them for the kernel messages to find out
exactly why.
-The standard 32-bit addressing device would do something like this:
+The standard 32-bit addressing device would do something like this::
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
dev_warn(dev, "mydev: No suitable DMA available\n");
@@ -225,7 +229,7 @@ than 64-bit addressing. For example, Sparc64 PCI SAC addressing is
more efficient than DAC addressing.
Here is how you would handle a 64-bit capable device which can drive
-all 64-bits when accessing streaming DMA:
+all 64-bits when accessing streaming DMA::
int using_dac;
@@ -239,7 +243,7 @@ all 64-bits when accessing streaming DMA:
}
If a card is capable of using 64-bit consistent allocations as well,
-the case would look like this:
+the case would look like this::
int using_dac, consistent_using_dac;
@@ -260,7 +264,7 @@ uses consistent allocations, one would have to check the return value from
dma_set_coherent_mask().
Finally, if your device can only drive the low 24-bits of
-address you might do something like:
+address you might do something like::
if (dma_set_mask(dev, DMA_BIT_MASK(24))) {
dev_warn(dev, "mydev: 24-bit DMA addressing not available\n");
@@ -280,7 +284,7 @@ only provide the functionality which the machine can handle. It
is important that the last call to dma_set_mask() be for the
most specific mask.
-Here is pseudo-code showing how this might be done:
+Here is pseudo-code showing how this might be done::
#define PLAYBACK_ADDRESS_BITS DMA_BIT_MASK(32)
#define RECORD_ADDRESS_BITS DMA_BIT_MASK(24)
@@ -308,7 +312,8 @@ A sound card was used as an example here because this genre of PCI
devices seems to be littered with ISA chips given a PCI front end,
and thus retaining the 16MB DMA addressing limitations of ISA.
- Types of DMA mappings
+Types of DMA mappings
+=====================
There are two types of DMA mappings:
@@ -336,12 +341,14 @@ There are two types of DMA mappings:
to memory is immediately visible to the device, and vice
versa. Consistent mappings guarantee this.
- IMPORTANT: Consistent DMA memory does not preclude the usage of
- proper memory barriers. The CPU may reorder stores to
+ .. important::
+
+ Consistent DMA memory does not preclude the usage of
+ proper memory barriers. The CPU may reorder stores to
consistent memory just as it may normal memory. Example:
if it is important for the device to see the first word
of a descriptor updated before the second, you must do
- something like:
+ something like::
desc->word0 = address;
wmb();
@@ -377,16 +384,17 @@ Also, systems with caches that aren't DMA-coherent will work better
when the underlying buffers don't share cache lines with other data.
- Using Consistent DMA mappings.
+Using Consistent DMA mappings
+=============================
To allocate and map large (PAGE_SIZE or so) consistent DMA regions,
-you should do:
+you should do::
dma_addr_t dma_handle;
cpu_addr = dma_alloc_coherent(dev, size, &dma_handle, gfp);
-where device is a struct device *. This may be called in interrupt
+where device is a ``struct device *``. This may be called in interrupt
context with the GFP_ATOMIC flag.
Size is the length of the region you want to allocate, in bytes.
@@ -415,7 +423,7 @@ exists (for example) to guarantee that if you allocate a chunk
which is smaller than or equal to 64 kilobytes, the extent of the
buffer you receive will not cross a 64K boundary.
-To unmap and free such a DMA region, you call:
+To unmap and free such a DMA region, you call::
dma_free_coherent(dev, size, cpu_addr, dma_handle);
@@ -430,7 +438,7 @@ a kmem_cache, but it uses dma_alloc_coherent(), not __get_free_pages().
Also, it understands common hardware constraints for alignment,
like queue heads needing to be aligned on N byte boundaries.
-Create a dma_pool like this:
+Create a dma_pool like this::
struct dma_pool *pool;
@@ -444,7 +452,7 @@ pass 0 for boundary; passing 4096 says memory allocated from this pool
must not cross 4KByte boundaries (but at that time it may be better to
use dma_alloc_coherent() directly instead).
-Allocate memory from a DMA pool like this:
+Allocate memory from a DMA pool like this::
cpu_addr = dma_pool_alloc(pool, flags, &dma_handle);
@@ -452,7 +460,7 @@ flags are GFP_KERNEL if blocking is permitted (not in_interrupt nor
holding SMP locks), GFP_ATOMIC otherwise. Like dma_alloc_coherent(),
this returns two values, cpu_addr and dma_handle.
-Free memory that was allocated from a dma_pool like this:
+Free memory that was allocated from a dma_pool like this::
dma_pool_free(pool, cpu_addr, dma_handle);
@@ -460,7 +468,7 @@ where pool is what you passed to dma_pool_alloc(), and cpu_addr and
dma_handle are the values dma_pool_alloc() returned. This function
may be called in interrupt context.
-Destroy a dma_pool by calling:
+Destroy a dma_pool by calling::
dma_pool_destroy(pool);
@@ -468,11 +476,12 @@ Make sure you've called dma_pool_free() for all memory allocated
from a pool before you destroy the pool. This function may not
be called in interrupt context.
- DMA Direction
+DMA Direction
+=============
The interfaces described in subsequent portions of this document
take a DMA direction argument, which is an integer and takes on
-one of the following values:
+one of the following values::
DMA_BIDIRECTIONAL
DMA_TO_DEVICE
@@ -521,14 +530,15 @@ packets, map/unmap them with the DMA_TO_DEVICE direction
specifier. For receive packets, just the opposite, map/unmap them
with the DMA_FROM_DEVICE direction specifier.
- Using Streaming DMA mappings
+Using Streaming DMA mappings
+============================
The streaming DMA mapping routines can be called from interrupt
context. There are two versions of each map/unmap, one which will
map/unmap a single memory region, and one which will map/unmap a
scatterlist.
-To map a single region, you do:
+To map a single region, you do::
struct device *dev = &my_dev->dev;
dma_addr_t dma_handle;
@@ -545,7 +555,7 @@ To map a single region, you do:
goto map_error_handling;
}
-and to unmap it:
+and to unmap it::
dma_unmap_single(dev, dma_handle, size, direction);
@@ -563,7 +573,7 @@ Using CPU pointers like this for single mappings has a disadvantage:
you cannot reference HIGHMEM memory in this way. Thus, there is a
map/unmap interface pair akin to dma_{map,unmap}_single(). These
interfaces deal with page/offset pairs instead of CPU pointers.
-Specifically:
+Specifically::
struct device *dev = &my_dev->dev;
dma_addr_t dma_handle;
@@ -593,7 +603,7 @@ error as outlined under the dma_map_single() discussion.
You should call dma_unmap_page() when the DMA activity is finished, e.g.,
from the interrupt which told you that the DMA transfer is done.
-With scatterlists, you map a region gathered from several regions by:
+With scatterlists, you map a region gathered from several regions by::
int i, count = dma_map_sg(dev, sglist, nents, direction);
struct scatterlist *sg;
@@ -617,16 +627,18 @@ Then you should loop count times (note: this can be less than nents times)
and use sg_dma_address() and sg_dma_len() macros where you previously
accessed sg->address and sg->length as shown above.
-To unmap a scatterlist, just call:
+To unmap a scatterlist, just call::
dma_unmap_sg(dev, sglist, nents, direction);
Again, make sure DMA activity has already finished.
-PLEASE NOTE: The 'nents' argument to the dma_unmap_sg call must be
- the _same_ one you passed into the dma_map_sg call,
- it should _NOT_ be the 'count' value _returned_ from the
- dma_map_sg call.
+.. note::
+
+ The 'nents' argument to the dma_unmap_sg call must be
+ the _same_ one you passed into the dma_map_sg call,
+ it should _NOT_ be the 'count' value _returned_ from the
+ dma_map_sg call.
Every dma_map_{single,sg}() call should have its dma_unmap_{single,sg}()
counterpart, because the DMA address space is a shared resource and
@@ -638,11 +650,11 @@ properly in order for the CPU and device to see the most up-to-date and
correct copy of the DMA buffer.
So, firstly, just map it with dma_map_{single,sg}(), and after each DMA
-transfer call either:
+transfer call either::
dma_sync_single_for_cpu(dev, dma_handle, size, direction);
-or:
+or::
dma_sync_sg_for_cpu(dev, sglist, nents, direction);
@@ -650,17 +662,19 @@ as appropriate.
Then, if you wish to let the device get at the DMA area again,
finish accessing the data with the CPU, and then before actually
-giving the buffer to the hardware call either:
+giving the buffer to the hardware call either::
dma_sync_single_for_device(dev, dma_handle, size, direction);
-or:
+or::
dma_sync_sg_for_device(dev, sglist, nents, direction);
as appropriate.
-PLEASE NOTE: The 'nents' argument to dma_sync_sg_for_cpu() and
+.. note::
+
+ The 'nents' argument to dma_sync_sg_for_cpu() and
dma_sync_sg_for_device() must be the same passed to
dma_map_sg(). It is _NOT_ the count returned by
dma_map_sg().
@@ -671,7 +685,7 @@ dma_map_*() call till dma_unmap_*(), then you don't have to call the
dma_sync_*() routines at all.
Here is pseudo code which shows a situation in which you would need
-to use the dma_sync_*() interfaces.
+to use the dma_sync_*() interfaces::
my_card_setup_receive_buffer(struct my_card *cp, char *buffer, int len)
{
@@ -747,7 +761,8 @@ is planned to completely remove virt_to_bus() and bus_to_virt() as
they are entirely deprecated. Some ports already do not provide these
as it is impossible to correctly support them.
- Handling Errors
+Handling Errors
+===============
DMA address space is limited on some architectures and an allocation
failure can be determined by:
@@ -755,7 +770,7 @@ failure can be determined by:
- checking if dma_alloc_coherent() returns NULL or dma_map_sg returns 0
- checking the dma_addr_t returned from dma_map_single() and dma_map_page()
- by using dma_mapping_error():
+ by using dma_mapping_error()::
dma_addr_t dma_handle;
@@ -773,7 +788,8 @@ failure can be determined by:
of a multiple page mapping attempt. These example are applicable to
dma_map_page() as well.
-Example 1:
+Example 1::
+
dma_addr_t dma_handle1;
dma_addr_t dma_handle2;
@@ -802,8 +818,12 @@ Example 1:
dma_unmap_single(dma_handle1);
map_error_handling1:
-Example 2: (if buffers are allocated in a loop, unmap all mapped buffers when
- mapping error is detected in the middle)
+Example 2::
+
+ /*
+ * if buffers are allocated in a loop, unmap all mapped buffers when
+ * mapping error is detected in the middle
+ */
dma_addr_t dma_addr;
dma_addr_t array[DMA_BUFFERS];
@@ -846,7 +866,8 @@ SCSI drivers must return SCSI_MLQUEUE_HOST_BUSY if the DMA mapping
fails in the queuecommand hook. This means that the SCSI subsystem
passes the command to the driver again later.
- Optimizing Unmap State Space Consumption
+Optimizing Unmap State Space Consumption
+========================================
On many platforms, dma_unmap_{single,page}() is simply a nop.
Therefore, keeping track of the mapping address and length is a waste
@@ -858,7 +879,7 @@ Actually, instead of describing the macros one by one, we'll
transform some example code.
1) Use DEFINE_DMA_UNMAP_{ADDR,LEN} in state saving structures.
- Example, before:
+ Example, before::
struct ring_state {
struct sk_buff *skb;
@@ -866,7 +887,7 @@ transform some example code.
__u32 len;
};
- after:
+ after::
struct ring_state {
struct sk_buff *skb;
@@ -875,23 +896,23 @@ transform some example code.
};
2) Use dma_unmap_{addr,len}_set() to set these values.
- Example, before:
+ Example, before::
ringp->mapping = FOO;
ringp->len = BAR;
- after:
+ after::
dma_unmap_addr_set(ringp, mapping, FOO);
dma_unmap_len_set(ringp, len, BAR);
3) Use dma_unmap_{addr,len}() to access these values.
- Example, before:
+ Example, before::
dma_unmap_single(dev, ringp->mapping, ringp->len,
DMA_FROM_DEVICE);
- after:
+ after::
dma_unmap_single(dev,
dma_unmap_addr(ringp, mapping),
@@ -902,7 +923,8 @@ It really should be self-explanatory. We treat the ADDR and LEN
separately, because it is possible for an implementation to only
need the address in order to perform the unmap operation.
- Platform Issues
+Platform Issues
+===============
If you are just writing drivers for Linux and do not maintain
an architecture port for the kernel, you can safely skip down
@@ -928,12 +950,13 @@ to "Closing".
alignment constraints (e.g. the alignment constraints about 64-bit
objects).
- Closing
+Closing
+=======
This document, and the API itself, would not be in its current
form without the feedback and suggestions from numerous individuals.
We would like to specifically mention, in no particular order, the
-following people:
+following people::
Russell King <rmk@arm.linux.org.uk>
Leo Dagum <dagum@barrel.engr.sgi.com>
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 71200dfa0922..45b29326d719 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -1,7 +1,8 @@
- Dynamic DMA mapping using the generic device
- ============================================
+============================================
+Dynamic DMA mapping using the generic device
+============================================
- James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
+:Author: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
This document describes the DMA API. For a more gentle introduction
of the API (and actual examples), see Documentation/DMA-API-HOWTO.txt.
@@ -12,10 +13,10 @@ machines. Unless you know that your driver absolutely has to support
non-consistent platforms (this is usually only legacy platforms) you
should only use the API described in part I.
-Part I - dma_ API
--------------------------------------
+Part I - dma_API
+----------------
-To get the dma_ API, you must #include <linux/dma-mapping.h>. This
+To get the dma_API, you must #include <linux/dma-mapping.h>. This
provides dma_addr_t and the interfaces described below.
A dma_addr_t can hold any valid DMA address for the platform. It can be
@@ -26,9 +27,11 @@ address space and the DMA address space.
Part Ia - Using large DMA-coherent buffers
------------------------------------------
-void *
-dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+::
+
+ void *
+ dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
Consistent memory is memory for which a write by either the device or
the processor can immediately be read by the processor or device
@@ -51,20 +54,24 @@ consolidate your requests for consistent memory as much as possible.
The simplest way to do that is to use the dma_pool calls (see below).
The flag parameter (dma_alloc_coherent() only) allows the caller to
-specify the GFP_ flags (see kmalloc()) for the allocation (the
+specify the ``GFP_`` flags (see kmalloc()) for the allocation (the
implementation may choose to ignore flags that affect the location of
the returned memory, like GFP_DMA).
-void *
-dma_zalloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+::
+
+ void *
+ dma_zalloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
Wraps dma_alloc_coherent() and also zeroes the returned memory if the
allocation attempt succeeded.
-void
-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+::
+
+ void
+ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
Free a region of consistent memory you previously allocated. dev,
size and dma_handle must all be the same as those passed into
@@ -78,7 +85,7 @@ may only be called with IRQs enabled.
Part Ib - Using small DMA-coherent buffers
------------------------------------------
-To get this part of the dma_ API, you must #include <linux/dmapool.h>
+To get this part of the dma_API, you must #include <linux/dmapool.h>
Many drivers need lots of small DMA-coherent memory regions for DMA
descriptors or I/O buffers. Rather than allocating in units of a page
@@ -88,6 +95,8 @@ not __get_free_pages(). Also, they understand common hardware constraints
for alignment, like queue heads needing to be aligned on N-byte boundaries.
+::
+
struct dma_pool *
dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t alloc);
@@ -103,16 +112,21 @@ in bytes, and must be a power of two). If your device has no boundary
crossing restrictions, pass 0 for alloc; passing 4096 says memory allocated
from this pool must not cross 4KByte boundaries.
+::
- void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
- dma_addr_t *handle)
+ void *
+ dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle)
Wraps dma_pool_alloc() and also zeroes the returned memory if the
allocation attempt succeeded.
- void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags,
- dma_addr_t *dma_handle);
+::
+
+ void *
+ dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags,
+ dma_addr_t *dma_handle);
This allocates memory from the pool; the returned memory will meet the
size and alignment requirements specified at creation time. Pass
@@ -122,16 +136,20 @@ blocking. Like dma_alloc_coherent(), this returns two values: an
address usable by the CPU, and the DMA address usable by the pool's
device.
+::
- void dma_pool_free(struct dma_pool *pool, void *vaddr,
- dma_addr_t addr);
+ void
+ dma_pool_free(struct dma_pool *pool, void *vaddr,
+ dma_addr_t addr);
This puts memory back into the pool. The pool is what was passed to
dma_pool_alloc(); the CPU (vaddr) and DMA addresses are what
were returned when that routine allocated the memory being freed.
+::
- void dma_pool_destroy(struct dma_pool *pool);
+ void
+ dma_pool_destroy(struct dma_pool *pool);
dma_pool_destroy() frees the resources of the pool. It must be
called in a context which can sleep. Make sure you've freed all allocated
@@ -141,32 +159,40 @@ memory back to the pool before you destroy it.
Part Ic - DMA addressing limitations
------------------------------------
-int
-dma_set_mask_and_coherent(struct device *dev, u64 mask)
+::
+
+ int
+ dma_set_mask_and_coherent(struct device *dev, u64 mask)
Checks to see if the mask is possible and updates the device
streaming and coherent DMA mask parameters if it is.
Returns: 0 if successful and a negative error if not.
-int
-dma_set_mask(struct device *dev, u64 mask)
+::
+
+ int
+ dma_set_mask(struct device *dev, u64 mask)
Checks to see if the mask is possible and updates the device
parameters if it is.
Returns: 0 if successful and a negative error if not.
-int
-dma_set_coherent_mask(struct device *dev, u64 mask)
+::
+
+ int
+ dma_set_coherent_mask(struct device *dev, u64 mask)
Checks to see if the mask is possible and updates the device
parameters if it is.
Returns: 0 if successful and a negative error if not.
-u64
-dma_get_required_mask(struct device *dev)
+::
+
+ u64
+ dma_get_required_mask(struct device *dev)
This API returns the mask that the platform requires to
operate efficiently. Usually this means the returned mask
@@ -182,94 +208,107 @@ call to set the mask to the value returned.
Part Id - Streaming DMA mappings
--------------------------------
-dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction direction)
+::
+
+ dma_addr_t
+ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
Maps a piece of processor virtual memory so it can be accessed by the
device and returns the DMA address of the memory.
The direction for both APIs may be converted freely by casting.
-However the dma_ API uses a strongly typed enumerator for its
+However the dma_API uses a strongly typed enumerator for its
direction:
+======================= =============================================
DMA_NONE no direction (used for debugging)
DMA_TO_DEVICE data is going from the memory to the device
DMA_FROM_DEVICE data is coming from the device to the memory
DMA_BIDIRECTIONAL direction isn't known
+======================= =============================================
+
+.. note::
+
+ Not all memory regions in a machine can be mapped by this API.
+ Further, contiguous kernel virtual space may not be contiguous as
+ physical memory. Since this API does not provide any scatter/gather
+ capability, it will fail if the user tries to map a non-physically
+ contiguous piece of memory. For this reason, memory to be mapped by
+ this API should be obtained from sources which guarantee it to be
+ physically contiguous (like kmalloc).
+
+ Further, the DMA address of the memory must be within the
+ dma_mask of the device (the dma_mask is a bit mask of the
+ addressable region for the device, i.e., if the DMA address of
+ the memory ANDed with the dma_mask is still equal to the DMA
+ address, then the device can perform DMA to the memory). To
+ ensure that the memory allocated by kmalloc is within the dma_mask,
+ the driver may specify various platform-dependent flags to restrict
+ the DMA address range of the allocation (e.g., on x86, GFP_DMA
+ guarantees to be within the first 16MB of available DMA addresses,
+ as required by ISA devices).
+
+ Note also that the above constraints on physical contiguity and
+ dma_mask may not apply if the platform has an IOMMU (a device which
+ maps an I/O DMA address to a physical memory address). However, to be
+ portable, device driver writers may *not* assume that such an IOMMU
+ exists.
+
+.. warning::
+
+ Memory coherency operates at a granularity called the cache
+ line width. In order for memory mapped by this API to operate
+ correctly, the mapped region must begin exactly on a cache line
+ boundary and end exactly on one (to prevent two separately mapped
+ regions from sharing a single cache line). Since the cache line size
+ may not be known at compile time, the API will not enforce this
+ requirement. Therefore, it is recommended that driver writers who
+ don't take special care to determine the cache line size at run time
+ only map virtual regions that begin and end on page boundaries (which
+ are guaranteed also to be cache line boundaries).
+
+ DMA_TO_DEVICE synchronisation must be done after the last modification
+ of the memory region by the software and before it is handed off to
+ the device. Once this primitive is used, memory covered by this
+ primitive should be treated as read-only by the device. If the device
+ may write to it at any point, it should be DMA_BIDIRECTIONAL (see
+ below).
+
+ DMA_FROM_DEVICE synchronisation must be done before the driver
+ accesses data that may be changed by the device. This memory should
+ be treated as read-only by the driver. If the driver needs to write
+ to it at any point, it should be DMA_BIDIRECTIONAL (see below).
+
+ DMA_BIDIRECTIONAL requires special handling: it means that the driver
+ isn't sure if the memory was modified before being handed off to the
+ device and also isn't sure if the device will also modify it. Thus,
+ you must always sync bidirectional memory twice: once before the
+ memory is handed off to the device (to make sure all memory changes
+ are flushed from the processor) and once before the data may be
+ accessed after being used by the device (to make sure any processor
+ cache lines are updated with data that the device may have changed).
+
+::
-Notes: Not all memory regions in a machine can be mapped by this API.
-Further, contiguous kernel virtual space may not be contiguous as
-physical memory. Since this API does not provide any scatter/gather
-capability, it will fail if the user tries to map a non-physically
-contiguous piece of memory. For this reason, memory to be mapped by
-this API should be obtained from sources which guarantee it to be
-physically contiguous (like kmalloc).
-
-Further, the DMA address of the memory must be within the
-dma_mask of the device (the dma_mask is a bit mask of the
-addressable region for the device, i.e., if the DMA address of
-the memory ANDed with the dma_mask is still equal to the DMA
-address, then the device can perform DMA to the memory). To
-ensure that the memory allocated by kmalloc is within the dma_mask,
-the driver may specify various platform-dependent flags to restrict
-the DMA address range of the allocation (e.g., on x86, GFP_DMA
-guarantees to be within the first 16MB of available DMA addresses,
-as required by ISA devices).
-
-Note also that the above constraints on physical contiguity and
-dma_mask may not apply if the platform has an IOMMU (a device which
-maps an I/O DMA address to a physical memory address). However, to be
-portable, device driver writers may *not* assume that such an IOMMU
-exists.
-
-Warnings: Memory coherency operates at a granularity called the cache
-line width. In order for memory mapped by this API to operate
-correctly, the mapped region must begin exactly on a cache line
-boundary and end exactly on one (to prevent two separately mapped
-regions from sharing a single cache line). Since the cache line size
-may not be known at compile time, the API will not enforce this
-requirement. Therefore, it is recommended that driver writers who
-don't take special care to determine the cache line size at run time
-only map virtual regions that begin and end on page boundaries (which
-are guaranteed also to be cache line boundaries).
-
-DMA_TO_DEVICE synchronisation must be done after the last modification
-of the memory region by the software and before it is handed off to
-the device. Once this primitive is used, memory covered by this
-primitive should be treated as read-only by the device. If the device
-may write to it at any point, it should be DMA_BIDIRECTIONAL (see
-below).
-
-DMA_FROM_DEVICE synchronisation must be done before the driver
-accesses data that may be changed by the device. This memory should
-be treated as read-only by the driver. If the driver needs to write
-to it at any point, it should be DMA_BIDIRECTIONAL (see below).
-
-DMA_BIDIRECTIONAL requires special handling: it means that the driver
-isn't sure if the memory was modified before being handed off to the
-device and also isn't sure if the device will also modify it. Thus,
-you must always sync bidirectional memory twice: once before the
-memory is handed off to the device (to make sure all memory changes
-are flushed from the processor) and once before the data may be
-accessed after being used by the device (to make sure any processor
-cache lines are updated with data that the device may have changed).
-
-void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
+ void
+ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
Unmaps the region previously mapped. All the parameters passed in
must be identical to those passed in (and returned) by the mapping
API.
-dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
+::
+
+ dma_addr_t
+ dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+
+ void
+ dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
API for mapping and unmapping for pages. All the notes and warnings
for the other mapping APIs apply here. Also, although the <offset>
@@ -277,20 +316,24 @@ and <size> parameters are provided to do partial page mapping, it is
recommended that you never use these unless you really know what the
cache width is.
-dma_addr_t
-dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
+::
-void
-dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
+ dma_addr_t
+ dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+
+ void
+ dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
API for mapping and unmapping for MMIO resources. All the notes and
warnings for the other mapping APIs apply here. The API should only be
used to map device MMIO resources, mapping of RAM is not permitted.
-int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+::
+
+ int
+ dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
In some circumstances dma_map_single(), dma_map_page() and dma_map_resource()
will fail to create a mapping. A driver can check for these errors by testing
@@ -298,9 +341,11 @@ the returned DMA address with dma_mapping_error(). A non-zero return value
means the mapping could not be created and the driver should take appropriate
action (e.g. reduce current DMA mapping usage or delay and try again later).
+::
+
int
dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
+ int nents, enum dma_data_direction direction)
Returns: the number of DMA address segments mapped (this may be shorter
than <nents> passed in if some elements of the scatter/gather list are
@@ -316,7 +361,7 @@ critical that the driver do something, in the case of a block driver
aborting the request or even oopsing is better than doing nothing and
corrupting the filesystem.
-With scatterlists, you use the resulting mapping like this:
+With scatterlists, you use the resulting mapping like this::
int i, count = dma_map_sg(dev, sglist, nents, direction);
struct scatterlist *sg;
@@ -337,9 +382,11 @@ Then you should loop count times (note: this can be less than nents times)
and use sg_dma_address() and sg_dma_len() macros where you previously
accessed sg->address and sg->length as shown above.
+::
+
void
dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
+ int nents, enum dma_data_direction direction)
Unmap the previously mapped scatter/gather list. All the parameters
must be the same as those and passed in to the scatter/gather mapping
@@ -348,18 +395,27 @@ API.
Note: <nents> must be the number you passed in, *not* the number of
DMA address entries returned.
-void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+::
+
+ void
+ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
+
+ void
+ dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
+
+ void
+ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents,
+ enum dma_data_direction direction)
+
+ void
+ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents,
+ enum dma_data_direction direction)
Synchronise a single contiguous or scatter/gather mapping for the CPU
and device. With the sync_sg API, all the parameters must be the same
@@ -367,36 +423,41 @@ as those passed into the single mapping API. With the sync_single API,
you can use dma_handle and size parameters that aren't identical to
those passed into the single mapping API to do a partial sync.
-Notes: You must do this:
-- Before reading values that have been written by DMA from the device
- (use the DMA_FROM_DEVICE direction)
-- After writing values that will be written to the device using DMA
- (use the DMA_TO_DEVICE) direction
-- before *and* after handing memory to the device if the memory is
- DMA_BIDIRECTIONAL
+.. note::
+
+ You must do this:
+
+ - Before reading values that have been written by DMA from the device
+ (use the DMA_FROM_DEVICE direction)
+ - After writing values that will be written to the device using DMA
+ (use the DMA_TO_DEVICE) direction
+ - before *and* after handing memory to the device if the memory is
+ DMA_BIDIRECTIONAL
See also dma_map_single().
-dma_addr_t
-dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+::
+
+ dma_addr_t
+ dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
-void
-dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+ void
+ dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
-int
-dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
+ int
+ dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
-void
-dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
+ void
+ dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
The four functions above are just like the counterpart functions
without the _attrs suffixes, except that they pass an optional
@@ -410,37 +471,38 @@ is identical to those of the corresponding function
without the _attrs suffix. As a result dma_map_single_attrs()
can generally replace dma_map_single(), etc.
-As an example of the use of the *_attrs functions, here's how
+As an example of the use of the ``*_attrs`` functions, here's how
you could pass an attribute DMA_ATTR_FOO when mapping memory
-for DMA:
+for DMA::
-#include <linux/dma-mapping.h>
-/* DMA_ATTR_FOO should be defined in linux/dma-mapping.h and
- * documented in Documentation/DMA-attributes.txt */
-...
+ #include <linux/dma-mapping.h>
+ /* DMA_ATTR_FOO should be defined in linux/dma-mapping.h and
+ * documented in Documentation/DMA-attributes.txt */
+ ...
- unsigned long attr;
- attr |= DMA_ATTR_FOO;
- ....
- n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, attr);
- ....
+ unsigned long attr;
+ attr |= DMA_ATTR_FOO;
+ ....
+ n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, attr);
+ ....
Architectures that care about DMA_ATTR_FOO would check for its
presence in their implementations of the mapping and unmapping
-routines, e.g.:
-
-void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- ....
- if (attrs & DMA_ATTR_FOO)
- /* twizzle the frobnozzle */
- ....
+routines, e.g.:::
+
+ void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+ {
+ ....
+ if (attrs & DMA_ATTR_FOO)
+ /* twizzle the frobnozzle */
+ ....
+ }
-Part II - Advanced dma_ usage
------------------------------
+Part II - Advanced dma usage
+----------------------------
Warning: These pieces of the DMA API should not be used in the
majority of cases, since they cater for unlikely corner cases that
@@ -450,9 +512,11 @@ If you don't understand how cache line coherency works between a
processor and an I/O device, you should not be using this part of the
API at all.
-void *
-dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+::
+
+ void *
+ dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
Identical to dma_alloc_coherent() except that the platform will
choose to return either consistent or non-consistent memory as it sees
@@ -468,39 +532,49 @@ only use this API if you positively know your driver will be
required to work on one of the rare (usually non-PCI) architectures
that simply cannot make consistent memory.
-void
-dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+::
+
+ void
+ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
Free memory allocated by the nonconsistent API. All parameters must
be identical to those passed in (and returned by
dma_alloc_noncoherent()).
-int
-dma_get_cache_alignment(void)
+::
+
+ int
+ dma_get_cache_alignment(void)
Returns the processor cache alignment. This is the absolute minimum
alignment *and* width that you must observe when either mapping
memory or doing partial flushes.
-Notes: This API may return a number *larger* than the actual cache
-line, but it will guarantee that one or more cache lines fit exactly
-into the width returned by this call. It will also always be a power
-of two for easy alignment.
+.. note::
-void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
+ This API may return a number *larger* than the actual cache
+ line, but it will guarantee that one or more cache lines fit exactly
+ into the width returned by this call. It will also always be a power
+ of two for easy alignment.
+
+::
+
+ void
+ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
Do a partial sync of memory that was allocated by
dma_alloc_noncoherent(), starting at virtual address vaddr and
continuing on for size. Again, you *must* observe the cache line
boundaries when doing this.
-int
-dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int
- flags)
+::
+
+ int
+ dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size, int
+ flags)
Declare region of memory to be handed out by dma_alloc_coherent() when
it's asked for coherent memory for this device.
@@ -516,21 +590,21 @@ size is the size of the area (must be multiples of PAGE_SIZE).
flags can be ORed together and are:
-DMA_MEMORY_MAP - request that the memory returned from
-dma_alloc_coherent() be directly writable.
+- DMA_MEMORY_MAP - request that the memory returned from
+ dma_alloc_coherent() be directly writable.
-DMA_MEMORY_IO - request that the memory returned from
-dma_alloc_coherent() be addressable using read()/write()/memcpy_toio() etc.
+- DMA_MEMORY_IO - request that the memory returned from
+ dma_alloc_coherent() be addressable using read()/write()/memcpy_toio() etc.
One or both of these flags must be present.
-DMA_MEMORY_INCLUDES_CHILDREN - make the declared memory be allocated by
-dma_alloc_coherent of any child devices of this one (for memory residing
-on a bridge).
+- DMA_MEMORY_INCLUDES_CHILDREN - make the declared memory be allocated by
+ dma_alloc_coherent of any child devices of this one (for memory residing
+ on a bridge).
-DMA_MEMORY_EXCLUSIVE - only allocate memory from the declared regions.
-Do not allow dma_alloc_coherent() to fall back to system memory when
-it's out of memory in the declared region.
+- DMA_MEMORY_EXCLUSIVE - only allocate memory from the declared regions.
+ Do not allow dma_alloc_coherent() to fall back to system memory when
+ it's out of memory in the declared region.
The return value will be either DMA_MEMORY_MAP or DMA_MEMORY_IO and
must correspond to a passed in flag (i.e. no returning DMA_MEMORY_IO
@@ -543,15 +617,17 @@ must be accessed using the correct bus functions. If your driver
isn't prepared to handle this contingency, it should not specify
DMA_MEMORY_IO in the input flags.
-As a simplification for the platforms, only *one* such region of
+As a simplification for the platforms, only **one** such region of
memory may be declared per device.
For reasons of efficiency, most platforms choose to track the declared
region only at the granularity of a page. For smaller allocations,
you should use the dma_pool() API.
-void
-dma_release_declared_memory(struct device *dev)
+::
+
+ void
+ dma_release_declared_memory(struct device *dev)
Remove the memory region previously declared from the system. This
API performs *no* in-use checking for this region and will return
@@ -559,9 +635,11 @@ unconditionally having removed all the required structures. It is the
driver's job to ensure that no parts of this memory region are
currently in use.
-void *
-dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
+::
+
+ void *
+ dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size)
This is used to occupy specific regions of the declared space
(dma_alloc_coherent() will hand out the first free region it finds).
@@ -592,38 +670,37 @@ option has a performance impact. Do not enable it in production kernels.
If you boot the resulting kernel will contain code which does some bookkeeping
about what DMA memory was allocated for which device. If this code detects an
error it prints a warning message with some details into your kernel log. An
-example warning message may look like this:
-
-------------[ cut here ]------------
-WARNING: at /data2/repos/linux-2.6-iommu/lib/dma-debug.c:448
- check_unmap+0x203/0x490()
-Hardware name:
-forcedeth 0000:00:08.0: DMA-API: device driver frees DMA memory with wrong
- function [device address=0x00000000640444be] [size=66 bytes] [mapped as
-single] [unmapped as page]
-Modules linked in: nfsd exportfs bridge stp llc r8169
-Pid: 0, comm: swapper Tainted: G W 2.6.28-dmatest-09289-g8bb99c0 #1
-Call Trace:
- <IRQ> [<ffffffff80240b22>] warn_slowpath+0xf2/0x130
- [<ffffffff80647b70>] _spin_unlock+0x10/0x30
- [<ffffffff80537e75>] usb_hcd_link_urb_to_ep+0x75/0xc0
- [<ffffffff80647c22>] _spin_unlock_irqrestore+0x12/0x40
- [<ffffffff8055347f>] ohci_urb_enqueue+0x19f/0x7c0
- [<ffffffff80252f96>] queue_work+0x56/0x60
- [<ffffffff80237e10>] enqueue_task_fair+0x20/0x50
- [<ffffffff80539279>] usb_hcd_submit_urb+0x379/0xbc0
- [<ffffffff803b78c3>] cpumask_next_and+0x23/0x40
- [<ffffffff80235177>] find_busiest_group+0x207/0x8a0
- [<ffffffff8064784f>] _spin_lock_irqsave+0x1f/0x50
- [<ffffffff803c7ea3>] check_unmap+0x203/0x490
- [<ffffffff803c8259>] debug_dma_unmap_page+0x49/0x50
- [<ffffffff80485f26>] nv_tx_done_optimized+0xc6/0x2c0
- [<ffffffff80486c13>] nv_nic_irq_optimized+0x73/0x2b0
- [<ffffffff8026df84>] handle_IRQ_event+0x34/0x70
- [<ffffffff8026ffe9>] handle_edge_irq+0xc9/0x150
- [<ffffffff8020e3ab>] do_IRQ+0xcb/0x1c0
- [<ffffffff8020c093>] ret_from_intr+0x0/0xa
- <EOI> <4>---[ end trace f6435a98e2a38c0e ]---
+example warning message may look like this::
+
+ WARNING: at /data2/repos/linux-2.6-iommu/lib/dma-debug.c:448
+ check_unmap+0x203/0x490()
+ Hardware name:
+ forcedeth 0000:00:08.0: DMA-API: device driver frees DMA memory with wrong
+ function [device address=0x00000000640444be] [size=66 bytes] [mapped as
+ single] [unmapped as page]
+ Modules linked in: nfsd exportfs bridge stp llc r8169
+ Pid: 0, comm: swapper Tainted: G W 2.6.28-dmatest-09289-g8bb99c0 #1
+ Call Trace:
+ <IRQ> [<ffffffff80240b22>] warn_slowpath+0xf2/0x130
+ [<ffffffff80647b70>] _spin_unlock+0x10/0x30
+ [<ffffffff80537e75>] usb_hcd_link_urb_to_ep+0x75/0xc0
+ [<ffffffff80647c22>] _spin_unlock_irqrestore+0x12/0x40
+ [<ffffffff8055347f>] ohci_urb_enqueue+0x19f/0x7c0
+ [<ffffffff80252f96>] queue_work+0x56/0x60
+ [<ffffffff80237e10>] enqueue_task_fair+0x20/0x50
+ [<ffffffff80539279>] usb_hcd_submit_urb+0x379/0xbc0
+ [<ffffffff803b78c3>] cpumask_next_and+0x23/0x40
+ [<ffffffff80235177>] find_busiest_group+0x207/0x8a0
+ [<ffffffff8064784f>] _spin_lock_irqsave+0x1f/0x50
+ [<ffffffff803c7ea3>] check_unmap+0x203/0x490
+ [<ffffffff803c8259>] debug_dma_unmap_page+0x49/0x50
+ [<ffffffff80485f26>] nv_tx_done_optimized+0xc6/0x2c0
+ [<ffffffff80486c13>] nv_nic_irq_optimized+0x73/0x2b0
+ [<ffffffff8026df84>] handle_IRQ_event+0x34/0x70
+ [<ffffffff8026ffe9>] handle_edge_irq+0xc9/0x150
+ [<ffffffff8020e3ab>] do_IRQ+0xcb/0x1c0
+ [<ffffffff8020c093>] ret_from_intr+0x0/0xa
+ <EOI> <4>---[ end trace f6435a98e2a38c0e ]---
The driver developer can find the driver and the device including a stacktrace
of the DMA-API call which caused this warning.
@@ -637,43 +714,42 @@ details.
The debugfs directory for the DMA-API debugging code is called dma-api/. In
this directory the following files can currently be found:
- dma-api/all_errors This file contains a numeric value. If this
+=============================== ===============================================
+dma-api/all_errors This file contains a numeric value. If this
value is not equal to zero the debugging code
will print a warning for every error it finds
into the kernel log. Be careful with this
option, as it can easily flood your logs.
- dma-api/disabled This read-only file contains the character 'Y'
+dma-api/disabled This read-only file contains the character 'Y'
if the debugging code is disabled. This can
happen when it runs out of memory or if it was
disabled at boot time
- dma-api/error_count This file is read-only and shows the total
+dma-api/error_count This file is read-only and shows the total
numbers of errors found.
- dma-api/num_errors The number in this file shows how many
+dma-api/num_errors The number in this file shows how many
warnings will be printed to the kernel log
before it stops. This number is initialized to
one at system boot and be set by writing into
this file
- dma-api/min_free_entries
- This read-only file can be read to get the
+dma-api/min_free_entries This read-only file can be read to get the
minimum number of free dma_debug_entries the
allocator has ever seen. If this value goes
down to zero the code will disable itself
because it is not longer reliable.
- dma-api/num_free_entries
- The current number of free dma_debug_entries
+dma-api/num_free_entries The current number of free dma_debug_entries
in the allocator.
- dma-api/driver-filter
- You can write a name of a driver into this file
+dma-api/driver-filter You can write a name of a driver into this file
to limit the debug output to requests from that
particular driver. Write an empty string to
that file to disable the filter and see
all errors again.
+=============================== ===============================================
If you have this code compiled into your kernel it will be enabled by default.
If you want to boot without the bookkeeping anyway you can provide
@@ -692,7 +768,10 @@ of preallocated entries is defined per architecture. If it is too low for you
boot with 'dma_debug_entries=<your_desired_number>' to overwrite the
architectural default.
-void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+::
+
+ void
+ debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
dma-debug interface debug_dma_mapping_error() to debug drivers that fail
to check DMA mapping errors on addresses returned by dma_map_single() and
@@ -702,4 +781,3 @@ the driver. When driver does unmap, debug_dma_unmap() checks the flag and if
this flag is still set, prints warning message that includes call trace that
leads up to the unmap. This interface can be called from dma_mapping_error()
routines to enable DMA mapping error check debugging.
-
diff --git a/Documentation/DMA-ISA-LPC.txt b/Documentation/DMA-ISA-LPC.txt
index c41331398752..8c2b8be6e45b 100644
--- a/Documentation/DMA-ISA-LPC.txt
+++ b/Documentation/DMA-ISA-LPC.txt
@@ -1,19 +1,20 @@
- DMA with ISA and LPC devices
- ============================
+============================
+DMA with ISA and LPC devices
+============================
- Pierre Ossman <drzeus@drzeus.cx>
+:Author: Pierre Ossman <drzeus@drzeus.cx>
This document describes how to do DMA transfers using the old ISA DMA
controller. Even though ISA is more or less dead today the LPC bus
uses the same DMA system so it will be around for quite some time.
-Part I - Headers and dependencies
----------------------------------
+Headers and dependencies
+------------------------
-To do ISA style DMA you need to include two headers:
+To do ISA style DMA you need to include two headers::
-#include <linux/dma-mapping.h>
-#include <asm/dma.h>
+ #include <linux/dma-mapping.h>
+ #include <asm/dma.h>
The first is the generic DMA API used to convert virtual addresses to
bus addresses (see Documentation/DMA-API.txt for details).
@@ -23,8 +24,8 @@ this is not present on all platforms make sure you construct your
Kconfig to be dependent on ISA_DMA_API (not ISA) so that nobody tries
to build your driver on unsupported platforms.
-Part II - Buffer allocation
----------------------------
+Buffer allocation
+-----------------
The ISA DMA controller has some very strict requirements on which
memory it can access so extra care must be taken when allocating
@@ -42,13 +43,13 @@ requirements you pass the flag GFP_DMA to kmalloc.
Unfortunately the memory available for ISA DMA is scarce so unless you
allocate the memory during boot-up it's a good idea to also pass
-__GFP_REPEAT and __GFP_NOWARN to make the allocator try a bit harder.
+__GFP_RETRY_MAYFAIL and __GFP_NOWARN to make the allocator try a bit harder.
(This scarcity also means that you should allocate the buffer as
early as possible and not release it until the driver is unloaded.)
-Part III - Address translation
-------------------------------
+Address translation
+-------------------
To translate the virtual address to a bus address, use the normal DMA
API. Do _not_ use isa_virt_to_phys() even though it does the same
@@ -61,8 +62,8 @@ Note: x86_64 had a broken DMA API when it came to ISA but has since
been fixed. If your arch has problems then fix the DMA API instead of
reverting to the ISA functions.
-Part IV - Channels
-------------------
+Channels
+--------
A normal ISA DMA controller has 8 channels. The lower four are for
8-bit transfers and the upper four are for 16-bit transfers.
@@ -80,8 +81,8 @@ The ability to use 16-bit or 8-bit transfers is _not_ up to you as a
driver author but depends on what the hardware supports. Check your
specs or test different channels.
-Part V - Transfer data
-----------------------
+Transfer data
+-------------
Now for the good stuff, the actual DMA transfer. :)
@@ -112,37 +113,37 @@ Once the DMA transfer is finished (or timed out) you should disable
the channel again. You should also check get_dma_residue() to make
sure that all data has been transferred.
-Example:
+Example::
-int flags, residue;
+ int flags, residue;
-flags = claim_dma_lock();
+ flags = claim_dma_lock();
-clear_dma_ff();
+ clear_dma_ff();
-set_dma_mode(channel, DMA_MODE_WRITE);
-set_dma_addr(channel, phys_addr);
-set_dma_count(channel, num_bytes);
+ set_dma_mode(channel, DMA_MODE_WRITE);
+ set_dma_addr(channel, phys_addr);
+ set_dma_count(channel, num_bytes);
-dma_enable(channel);
+ dma_enable(channel);
-release_dma_lock(flags);
+ release_dma_lock(flags);
-while (!device_done());
+ while (!device_done());
-flags = claim_dma_lock();
+ flags = claim_dma_lock();
-dma_disable(channel);
+ dma_disable(channel);
-residue = dma_get_residue(channel);
-if (residue != 0)
- printk(KERN_ERR "driver: Incomplete DMA transfer!"
- " %d bytes left!\n", residue);
+ residue = dma_get_residue(channel);
+ if (residue != 0)
+ printk(KERN_ERR "driver: Incomplete DMA transfer!"
+ " %d bytes left!\n", residue);
-release_dma_lock(flags);
+ release_dma_lock(flags);
-Part VI - Suspend/resume
-------------------------
+Suspend/resume
+--------------
It is the driver's responsibility to make sure that the machine isn't
suspended while a DMA transfer is in progress. Also, all DMA settings
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index 44c6bc496eee..8f8d97f65d73 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -1,5 +1,6 @@
- DMA attributes
- ==============
+==============
+DMA attributes
+==============
This document describes the semantics of the DMA attributes that are
defined in linux/dma-mapping.h.
@@ -108,6 +109,7 @@ This is a hint to the DMA-mapping subsystem that it's probably not worth
the time to try to allocate memory to in a way that gives better TLB
efficiency (AKA it's not worth trying to build the mapping out of larger
pages). You might want to specify this if:
+
- You know that the accesses to this memory won't thrash the TLB.
You might know that the accesses are likely to be sequential or
that they aren't sequential but it's unlikely you'll ping-pong
@@ -121,11 +123,12 @@ pages). You might want to specify this if:
the mapping to have a short lifetime then it may be worth it to
optimize allocation (avoid coming up with large pages) instead of
getting the slight performance win of larger pages.
+
Setting this hint doesn't guarantee that you won't get huge pages, but it
means that we won't try quite as hard to get them.
-NOTE: At the moment DMA_ATTR_ALLOC_SINGLE_PAGES is only implemented on ARM,
-though ARM64 patches will likely be posted soon.
+.. note:: At the moment DMA_ATTR_ALLOC_SINGLE_PAGES is only implemented on ARM,
+ though ARM64 patches will likely be posted soon.
DMA_ATTR_NO_WARN
----------------
@@ -142,10 +145,10 @@ problem at all, depending on the implementation of the retry mechanism.
So, this provides a way for drivers to avoid those error messages on calls
where allocation failures are not a problem, and shouldn't bother the logs.
-NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
+.. note:: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
DMA_ATTR_PRIVILEGED
-------------------------------
+-------------------
Some advanced peripherals such as remote processors and GPUs perform
accesses to DMA buffers in both privileged "supervisor" and unprivileged
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 6962cab997ef..aa77a25a0940 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -1,9 +1,8 @@
+=====================
+The Linux IPMI Driver
+=====================
- The Linux IPMI Driver
- ---------------------
- Corey Minyard
- <minyard@mvista.com>
- <minyard@acm.org>
+:Author: Corey Minyard <minyard@mvista.com> / <minyard@acm.org>
The Intelligent Platform Management Interface, or IPMI, is a
standard for controlling intelligent devices that monitor a system.
@@ -141,7 +140,7 @@ Addressing
----------
The IPMI addressing works much like IP addresses, you have an overlay
-to handle the different address types. The overlay is:
+to handle the different address types. The overlay is::
struct ipmi_addr
{
@@ -153,7 +152,7 @@ to handle the different address types. The overlay is:
The addr_type determines what the address really is. The driver
currently understands two different types of addresses.
-"System Interface" addresses are defined as:
+"System Interface" addresses are defined as::
struct ipmi_system_interface_addr
{
@@ -166,7 +165,7 @@ straight to the BMC on the current card. The channel must be
IPMI_BMC_CHANNEL.
Messages that are destined to go out on the IPMB bus use the
-IPMI_IPMB_ADDR_TYPE address type. The format is
+IPMI_IPMB_ADDR_TYPE address type. The format is::
struct ipmi_ipmb_addr
{
@@ -184,16 +183,16 @@ spec.
Messages
--------
-Messages are defined as:
+Messages are defined as::
-struct ipmi_msg
-{
+ struct ipmi_msg
+ {
unsigned char netfn;
unsigned char lun;
unsigned char cmd;
unsigned char *data;
int data_len;
-};
+ };
The driver takes care of adding/stripping the header information. The
data portion is just the data to be send (do NOT put addressing info
@@ -208,7 +207,7 @@ block of data, even when receiving messages. Otherwise the driver
will have no place to put the message.
Messages coming up from the message handler in kernelland will come in
-as:
+as::
struct ipmi_recv_msg
{
@@ -246,6 +245,7 @@ and the user should not have to care what type of SMI is below them.
Watching For Interfaces
+^^^^^^^^^^^^^^^^^^^^^^^
When your code comes up, the IPMI driver may or may not have detected
if IPMI devices exist. So you might have to defer your setup until
@@ -256,6 +256,7 @@ and tell you when they come and go.
Creating the User
+^^^^^^^^^^^^^^^^^
To use the message handler, you must first create a user using
ipmi_create_user. The interface number specifies which SMI you want
@@ -272,6 +273,7 @@ closing the device automatically destroys the user.
Messaging
+^^^^^^^^^
To send a message from kernel-land, the ipmi_request_settime() call does
pretty much all message handling. Most of the parameter are
@@ -321,6 +323,7 @@ though, since it is tricky to manage your own buffers.
Events and Incoming Commands
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The driver takes care of polling for IPMI events and receiving
commands (commands are messages that are not responses, they are
@@ -367,7 +370,7 @@ in the system. It discovers interfaces through a host of different
methods, depending on the system.
You can specify up to four interfaces on the module load line and
-control some module parameters:
+control some module parameters::
modprobe ipmi_si.o type=<type1>,<type2>....
ports=<port1>,<port2>... addrs=<addr1>,<addr2>...
@@ -437,7 +440,7 @@ default is one. Setting to 0 is useful with the hotmod, but is
obviously only useful for modules.
When compiled into the kernel, the parameters can be specified on the
-kernel command line as:
+kernel command line as::
ipmi_si.type=<type1>,<type2>...
ipmi_si.ports=<port1>,<port2>... ipmi_si.addrs=<addr1>,<addr2>...
@@ -474,16 +477,22 @@ The driver supports a hot add and remove of interfaces. This way,
interfaces can be added or removed after the kernel is up and running.
This is done using /sys/modules/ipmi_si/parameters/hotmod, which is a
write-only parameter. You write a string to this interface. The string
-has the format:
+has the format::
+
<op1>[:op2[:op3...]]
-The "op"s are:
+
+The "op"s are::
+
add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
-You can specify more than one interface on the line. The "opt"s are:
+
+You can specify more than one interface on the line. The "opt"s are::
+
rsp=<regspacing>
rsi=<regsize>
rsh=<regshift>
irq=<irq>
ipmb=<ipmb slave addr>
+
and these have the same meanings as discussed above. Note that you
can also use this on the kernel command line for a more compact format
for specifying an interface. Note that when removing an interface,
@@ -496,7 +505,7 @@ The SMBus Driver (SSIF)
The SMBus driver allows up to 4 SMBus devices to be configured in the
system. By default, the driver will only register with something it
finds in DMI or ACPI tables. You can change this
-at module load time (for a module) with:
+at module load time (for a module) with::
modprobe ipmi_ssif.o
addr=<i2caddr1>[,<i2caddr2>[,...]]
@@ -535,7 +544,7 @@ the smb_addr parameter unless you have DMI or ACPI data to tell the
driver what to use.
When compiled into the kernel, the addresses can be specified on the
-kernel command line as:
+kernel command line as::
ipmb_ssif.addr=<i2caddr1>[,<i2caddr2>[...]]
ipmi_ssif.adapter=<adapter1>[,<adapter2>[...]]
@@ -565,9 +574,9 @@ Some users need more detailed information about a device, like where
the address came from or the raw base device for the IPMI interface.
You can use the IPMI smi_watcher to catch the IPMI interfaces as they
come or go, and to grab the information, you can use the function
-ipmi_get_smi_info(), which returns the following structure:
+ipmi_get_smi_info(), which returns the following structure::
-struct ipmi_smi_info {
+ struct ipmi_smi_info {
enum ipmi_addr_src addr_src;
struct device *dev;
union {
@@ -575,7 +584,7 @@ struct ipmi_smi_info {
void *acpi_handle;
} acpi_info;
} addr_info;
-};
+ };
Currently special info for only for SI_ACPI address sources is
returned. Others may be added as necessary.
@@ -590,7 +599,7 @@ Watchdog
A watchdog timer is provided that implements the Linux-standard
watchdog timer interface. It has three module parameters that can be
-used to control it:
+used to control it::
modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
preaction=<preaction type> preop=<preop type> start_now=x
@@ -635,7 +644,7 @@ watchdog device is closed. The default value of nowayout is true
if the CONFIG_WATCHDOG_NOWAYOUT option is enabled, or false if not.
When compiled into the kernel, the kernel command line is available
-for configuring the watchdog:
+for configuring the watchdog::
ipmi_watchdog.timeout=<t> ipmi_watchdog.pretimeout=<t>
ipmi_watchdog.action=<action type>
@@ -675,6 +684,7 @@ also get a bunch of OEM events holding the panic string.
The field settings of the events are:
+
* Generator ID: 0x21 (kernel)
* EvM Rev: 0x03 (this event is formatting in IPMI 1.0 format)
* Sensor Type: 0x20 (OS critical stop sensor)
@@ -683,18 +693,20 @@ The field settings of the events are:
* Event Data 1: 0xa1 (Runtime stop in OEM bytes 2 and 3)
* Event data 2: second byte of panic string
* Event data 3: third byte of panic string
+
See the IPMI spec for the details of the event layout. This event is
always sent to the local management controller. It will handle routing
the message to the right place
Other OEM events have the following format:
-Record ID (bytes 0-1): Set by the SEL.
-Record type (byte 2): 0xf0 (OEM non-timestamped)
-byte 3: The slave address of the card saving the panic
-byte 4: A sequence number (starting at zero)
-The rest of the bytes (11 bytes) are the panic string. If the panic string
-is longer than 11 bytes, multiple messages will be sent with increasing
-sequence numbers.
+
+* Record ID (bytes 0-1): Set by the SEL.
+* Record type (byte 2): 0xf0 (OEM non-timestamped)
+* byte 3: The slave address of the card saving the panic
+* byte 4: A sequence number (starting at zero)
+ The rest of the bytes (11 bytes) are the panic string. If the panic string
+ is longer than 11 bytes, multiple messages will be sent with increasing
+ sequence numbers.
Because you cannot send OEM events using the standard interface, this
function will attempt to find an SEL and add the events there. It
diff --git a/Documentation/IRQ-affinity.txt b/Documentation/IRQ-affinity.txt
index 01a675175a36..29da5000836a 100644
--- a/Documentation/IRQ-affinity.txt
+++ b/Documentation/IRQ-affinity.txt
@@ -1,8 +1,11 @@
+================
+SMP IRQ affinity
+================
+
ChangeLog:
- Started by Ingo Molnar <mingo@redhat.com>
- Update by Max Krasnyansky <maxk@qualcomm.com>
+ - Started by Ingo Molnar <mingo@redhat.com>
+ - Update by Max Krasnyansky <maxk@qualcomm.com>
-SMP IRQ affinity
/proc/irq/IRQ#/smp_affinity and /proc/irq/IRQ#/smp_affinity_list specify
which target CPUs are permitted for a given IRQ source. It's a bitmask
@@ -16,50 +19,52 @@ will be set to the default mask. It can then be changed as described above.
Default mask is 0xffffffff.
Here is an example of restricting IRQ44 (eth1) to CPU0-3 then restricting
-it to CPU4-7 (this is an 8-CPU SMP box):
+it to CPU4-7 (this is an 8-CPU SMP box)::
-[root@moon 44]# cd /proc/irq/44
-[root@moon 44]# cat smp_affinity
-ffffffff
+ [root@moon 44]# cd /proc/irq/44
+ [root@moon 44]# cat smp_affinity
+ ffffffff
-[root@moon 44]# echo 0f > smp_affinity
-[root@moon 44]# cat smp_affinity
-0000000f
-[root@moon 44]# ping -f h
-PING hell (195.4.7.3): 56 data bytes
-...
---- hell ping statistics ---
-6029 packets transmitted, 6027 packets received, 0% packet loss
-round-trip min/avg/max = 0.1/0.1/0.4 ms
-[root@moon 44]# cat /proc/interrupts | grep 'CPU\|44:'
- CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
- 44: 1068 1785 1785 1783 0 0 0 0 IO-APIC-level eth1
+ [root@moon 44]# echo 0f > smp_affinity
+ [root@moon 44]# cat smp_affinity
+ 0000000f
+ [root@moon 44]# ping -f h
+ PING hell (195.4.7.3): 56 data bytes
+ ...
+ --- hell ping statistics ---
+ 6029 packets transmitted, 6027 packets received, 0% packet loss
+ round-trip min/avg/max = 0.1/0.1/0.4 ms
+ [root@moon 44]# cat /proc/interrupts | grep 'CPU\|44:'
+ CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
+ 44: 1068 1785 1785 1783 0 0 0 0 IO-APIC-level eth1
As can be seen from the line above IRQ44 was delivered only to the first four
processors (0-3).
Now lets restrict that IRQ to CPU(4-7).
-[root@moon 44]# echo f0 > smp_affinity
-[root@moon 44]# cat smp_affinity
-000000f0
-[root@moon 44]# ping -f h
-PING hell (195.4.7.3): 56 data bytes
-..
---- hell ping statistics ---
-2779 packets transmitted, 2777 packets received, 0% packet loss
-round-trip min/avg/max = 0.1/0.5/585.4 ms
-[root@moon 44]# cat /proc/interrupts | 'CPU\|44:'
- CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
- 44: 1068 1785 1785 1783 1784 1069 1070 1069 IO-APIC-level eth1
+::
+
+ [root@moon 44]# echo f0 > smp_affinity
+ [root@moon 44]# cat smp_affinity
+ 000000f0
+ [root@moon 44]# ping -f h
+ PING hell (195.4.7.3): 56 data bytes
+ ..
+ --- hell ping statistics ---
+ 2779 packets transmitted, 2777 packets received, 0% packet loss
+ round-trip min/avg/max = 0.1/0.5/585.4 ms
+ [root@moon 44]# cat /proc/interrupts | 'CPU\|44:'
+ CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
+ 44: 1068 1785 1785 1783 1784 1069 1070 1069 IO-APIC-level eth1
This time around IRQ44 was delivered only to the last four processors.
i.e counters for the CPU0-3 did not change.
-Here is an example of limiting that same irq (44) to cpus 1024 to 1031:
+Here is an example of limiting that same irq (44) to cpus 1024 to 1031::
-[root@moon 44]# echo 1024-1031 > smp_affinity_list
-[root@moon 44]# cat smp_affinity_list
-1024-1031
+ [root@moon 44]# echo 1024-1031 > smp_affinity_list
+ [root@moon 44]# cat smp_affinity_list
+ 1024-1031
Note that to do this with a bitmask would require 32 bitmasks of zero
to follow the pertinent one.
diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt
index 1f246eb25ca5..4a1cd7645d85 100644
--- a/Documentation/IRQ-domain.txt
+++ b/Documentation/IRQ-domain.txt
@@ -1,4 +1,6 @@
-irq_domain interrupt number mapping library
+===============================================
+The irq_domain interrupt number mapping library
+===============================================
The current design of the Linux kernel uses a single large number
space where each separate IRQ source is assigned a different number.
@@ -36,7 +38,9 @@ irq_domain also implements translation from an abstract irq_fwspec
structure to hwirq numbers (Device Tree and ACPI GSI so far), and can
be easily extended to support other IRQ topology data sources.
-=== irq_domain usage ===
+irq_domain usage
+================
+
An interrupt controller driver creates and registers an irq_domain by
calling one of the irq_domain_add_*() functions (each mapping method
has a different allocator function, more on that later). The function
@@ -62,15 +66,21 @@ If the driver has the Linux IRQ number or the irq_data pointer, and
needs to know the associated hwirq number (such as in the irq_chip
callbacks) then it can be directly obtained from irq_data->hwirq.
-=== Types of irq_domain mappings ===
+Types of irq_domain mappings
+============================
+
There are several mechanisms available for reverse mapping from hwirq
to Linux irq, and each mechanism uses a different allocation function.
Which reverse map type should be used depends on the use case. Each
of the reverse map types are described below:
-==== Linear ====
-irq_domain_add_linear()
-irq_domain_create_linear()
+Linear
+------
+
+::
+
+ irq_domain_add_linear()
+ irq_domain_create_linear()
The linear reverse map maintains a fixed size table indexed by the
hwirq number. When a hwirq is mapped, an irq_desc is allocated for
@@ -89,9 +99,13 @@ accepts a more general abstraction 'struct fwnode_handle'.
The majority of drivers should use the linear map.
-==== Tree ====
-irq_domain_add_tree()
-irq_domain_create_tree()
+Tree
+----
+
+::
+
+ irq_domain_add_tree()
+ irq_domain_create_tree()
The irq_domain maintains a radix tree map from hwirq numbers to Linux
IRQs. When an hwirq is mapped, an irq_desc is allocated and the
@@ -109,8 +123,12 @@ accepts a more general abstraction 'struct fwnode_handle'.
Very few drivers should need this mapping.
-==== No Map ===-
-irq_domain_add_nomap()
+No Map
+------
+
+::
+
+ irq_domain_add_nomap()
The No Map mapping is to be used when the hwirq number is
programmable in the hardware. In this case it is best to program the
@@ -121,10 +139,14 @@ Linux IRQ number into the hardware.
Most drivers cannot use this mapping.
-==== Legacy ====
-irq_domain_add_simple()
-irq_domain_add_legacy()
-irq_domain_add_legacy_isa()
+Legacy
+------
+
+::
+
+ irq_domain_add_simple()
+ irq_domain_add_legacy()
+ irq_domain_add_legacy_isa()
The Legacy mapping is a special case for drivers that already have a
range of irq_descs allocated for the hwirqs. It is used when the
@@ -163,14 +185,17 @@ that the driver using the simple domain call irq_create_mapping()
before any irq_find_mapping() since the latter will actually work
for the static IRQ assignment case.
-==== Hierarchy IRQ domain ====
+Hierarchy IRQ domain
+--------------------
+
On some architectures, there may be multiple interrupt controllers
involved in delivering an interrupt from the device to the target CPU.
-Let's look at a typical interrupt delivering path on x86 platforms:
+Let's look at a typical interrupt delivering path on x86 platforms::
-Device --> IOAPIC -> Interrupt remapping Controller -> Local APIC -> CPU
+ Device --> IOAPIC -> Interrupt remapping Controller -> Local APIC -> CPU
There are three interrupt controllers involved:
+
1) IOAPIC controller
2) Interrupt remapping controller
3) Local APIC controller
@@ -180,7 +205,8 @@ hardware architecture, an irq_domain data structure is built for each
interrupt controller and those irq_domains are organized into hierarchy.
When building irq_domain hierarchy, the irq_domain near to the device is
child and the irq_domain near to CPU is parent. So a hierarchy structure
-as below will be built for the example above.
+as below will be built for the example above::
+
CPU Vector irq_domain (root irq_domain to manage CPU vectors)
^
|
@@ -190,6 +216,7 @@ as below will be built for the example above.
IOAPIC irq_domain (manage IOAPIC delivery entries/pins)
There are four major interfaces to use hierarchy irq_domain:
+
1) irq_domain_alloc_irqs(): allocate IRQ descriptors and interrupt
controller related resources to deliver these interrupts.
2) irq_domain_free_irqs(): free IRQ descriptors and interrupt controller
@@ -199,7 +226,8 @@ There are four major interfaces to use hierarchy irq_domain:
4) irq_domain_deactivate_irq(): deactivate interrupt controller hardware
to stop delivering the interrupt.
-Following changes are needed to support hierarchy irq_domain.
+Following changes are needed to support hierarchy irq_domain:
+
1) a new field 'parent' is added to struct irq_domain; it's used to
maintain irq_domain hierarchy information.
2) a new field 'parent_data' is added to struct irq_data; it's used to
@@ -223,6 +251,7 @@ software architecture.
For an interrupt controller driver to support hierarchy irq_domain, it
needs to:
+
1) Implement irq_domain_ops.alloc and irq_domain_ops.free
2) Optionally implement irq_domain_ops.activate and
irq_domain_ops.deactivate.
diff --git a/Documentation/IRQ.txt b/Documentation/IRQ.txt
index 1011e7175021..4273806a606b 100644
--- a/Documentation/IRQ.txt
+++ b/Documentation/IRQ.txt
@@ -1,4 +1,6 @@
+===============
What is an IRQ?
+===============
An IRQ is an interrupt request from a device.
Currently they can come in over a pin, or over a packet.
diff --git a/Documentation/Intel-IOMMU.txt b/Documentation/Intel-IOMMU.txt
index 49585b6e1ea2..9dae6b47e398 100644
--- a/Documentation/Intel-IOMMU.txt
+++ b/Documentation/Intel-IOMMU.txt
@@ -1,3 +1,4 @@
+===================
Linux IOMMU Support
===================
@@ -9,11 +10,11 @@ This guide gives a quick cheat sheet for some basic understanding.
Some Keywords
-DMAR - DMA remapping
-DRHD - DMA Remapping Hardware Unit Definition
-RMRR - Reserved memory Region Reporting Structure
-ZLR - Zero length reads from PCI devices
-IOVA - IO Virtual address.
+- DMAR - DMA remapping
+- DRHD - DMA Remapping Hardware Unit Definition
+- RMRR - Reserved memory Region Reporting Structure
+- ZLR - Zero length reads from PCI devices
+- IOVA - IO Virtual address.
Basic stuff
-----------
@@ -33,7 +34,7 @@ devices that need to access these regions. OS is expected to setup
unity mappings for these regions for these devices to access these regions.
How is IOVA generated?
----------------------
+----------------------
Well behaved drivers call pci_map_*() calls before sending command to device
that needs to perform DMA. Once DMA is completed and mapping is no longer
@@ -82,14 +83,14 @@ in ACPI.
ACPI: DMAR (v001 A M I OEMDMAR 0x00000001 MSFT 0x00000097) @ 0x000000007f5b5ef0
When DMAR is being processed and initialized by ACPI, prints DMAR locations
-and any RMRR's processed.
+and any RMRR's processed::
-ACPI DMAR:Host address width 36
-ACPI DMAR:DRHD (flags: 0x00000000)base: 0x00000000fed90000
-ACPI DMAR:DRHD (flags: 0x00000000)base: 0x00000000fed91000
-ACPI DMAR:DRHD (flags: 0x00000001)base: 0x00000000fed93000
-ACPI DMAR:RMRR base: 0x00000000000ed000 end: 0x00000000000effff
-ACPI DMAR:RMRR base: 0x000000007f600000 end: 0x000000007fffffff
+ ACPI DMAR:Host address width 36
+ ACPI DMAR:DRHD (flags: 0x00000000)base: 0x00000000fed90000
+ ACPI DMAR:DRHD (flags: 0x00000000)base: 0x00000000fed91000
+ ACPI DMAR:DRHD (flags: 0x00000001)base: 0x00000000fed93000
+ ACPI DMAR:RMRR base: 0x00000000000ed000 end: 0x00000000000effff
+ ACPI DMAR:RMRR base: 0x000000007f600000 end: 0x000000007fffffff
When DMAR is enabled for use, you will notice..
@@ -98,10 +99,12 @@ PCI-DMA: Using DMAR IOMMU
Fault reporting
---------------
-DMAR:[DMA Write] Request device [00:02.0] fault addr 6df084000
-DMAR:[fault reason 05] PTE Write access is not set
-DMAR:[DMA Write] Request device [00:02.0] fault addr 6df084000
-DMAR:[fault reason 05] PTE Write access is not set
+::
+
+ DMAR:[DMA Write] Request device [00:02.0] fault addr 6df084000
+ DMAR:[fault reason 05] PTE Write access is not set
+ DMAR:[DMA Write] Request device [00:02.0] fault addr 6df084000
+ DMAR:[fault reason 05] PTE Write access is not set
TBD
----
diff --git a/Documentation/SAK.txt b/Documentation/SAK.txt
index 74be14679ed8..260e1d3687bd 100644
--- a/Documentation/SAK.txt
+++ b/Documentation/SAK.txt
@@ -1,5 +1,9 @@
-Linux 2.4.2 Secure Attention Key (SAK) handling
-18 March 2001, Andrew Morton
+=========================================
+Linux Secure Attention Key (SAK) handling
+=========================================
+
+:Date: 18 March 2001
+:Author: Andrew Morton
An operating system's Secure Attention Key is a security tool which is
provided as protection against trojan password capturing programs. It
@@ -13,7 +17,7 @@ this sequence. It is only available if the kernel was compiled with
sysrq support.
The proper way of generating a SAK is to define the key sequence using
-`loadkeys'. This will work whether or not sysrq support is compiled
+``loadkeys``. This will work whether or not sysrq support is compiled
into the kernel.
SAK works correctly when the keyboard is in raw mode. This means that
@@ -25,64 +29,63 @@ What key sequence should you use? Well, CTRL-ALT-DEL is used to reboot
the machine. CTRL-ALT-BACKSPACE is magical to the X server. We'll
choose CTRL-ALT-PAUSE.
-In your rc.sysinit (or rc.local) file, add the command
+In your rc.sysinit (or rc.local) file, add the command::
echo "control alt keycode 101 = SAK" | /bin/loadkeys
And that's it! Only the superuser may reprogram the SAK key.
-NOTES
-=====
+.. note::
-1: Linux SAK is said to be not a "true SAK" as is required by
- systems which implement C2 level security. This author does not
- know why.
+ 1. Linux SAK is said to be not a "true SAK" as is required by
+ systems which implement C2 level security. This author does not
+ know why.
-2: On the PC keyboard, SAK kills all applications which have
- /dev/console opened.
+ 2. On the PC keyboard, SAK kills all applications which have
+ /dev/console opened.
- Unfortunately this includes a number of things which you don't
- actually want killed. This is because these applications are
- incorrectly holding /dev/console open. Be sure to complain to your
- Linux distributor about this!
+ Unfortunately this includes a number of things which you don't
+ actually want killed. This is because these applications are
+ incorrectly holding /dev/console open. Be sure to complain to your
+ Linux distributor about this!
- You can identify processes which will be killed by SAK with the
- command
+ You can identify processes which will be killed by SAK with the
+ command::
# ls -l /proc/[0-9]*/fd/* | grep console
l-wx------ 1 root root 64 Mar 18 00:46 /proc/579/fd/0 -> /dev/console
- Then:
+ Then::
# ps aux|grep 579
root 579 0.0 0.1 1088 436 ? S 00:43 0:00 gpm -t ps/2
- So `gpm' will be killed by SAK. This is a bug in gpm. It should
- be closing standard input. You can work around this by finding the
- initscript which launches gpm and changing it thusly:
+ So ``gpm`` will be killed by SAK. This is a bug in gpm. It should
+ be closing standard input. You can work around this by finding the
+ initscript which launches gpm and changing it thusly:
- Old:
+ Old::
daemon gpm
- New:
+ New::
daemon gpm < /dev/null
- Vixie cron also seems to have this problem, and needs the same treatment.
+ Vixie cron also seems to have this problem, and needs the same treatment.
- Also, one prominent Linux distribution has the following three
- lines in its rc.sysinit and rc scripts:
+ Also, one prominent Linux distribution has the following three
+ lines in its rc.sysinit and rc scripts::
exec 3<&0
exec 4>&1
exec 5>&2
- These commands cause *all* daemons which are launched by the
- initscripts to have file descriptors 3, 4 and 5 attached to
- /dev/console. So SAK kills them all. A workaround is to simply
- delete these lines, but this may cause system management
- applications to malfunction - test everything well.
+ These commands cause **all** daemons which are launched by the
+ initscripts to have file descriptors 3, 4 and 5 attached to
+ /dev/console. So SAK kills them all. A workaround is to simply
+ delete these lines, but this may cause system management
+ applications to malfunction - test everything well.
diff --git a/Documentation/SM501.txt b/Documentation/SM501.txt
index 561826f82093..882507453ba4 100644
--- a/Documentation/SM501.txt
+++ b/Documentation/SM501.txt
@@ -1,7 +1,10 @@
- SM501 Driver
- ============
+.. include:: <isonum.txt>
-Copyright 2006, 2007 Simtec Electronics
+============
+SM501 Driver
+============
+
+:Copyright: |copy| 2006, 2007 Simtec Electronics
The Silicon Motion SM501 multimedia companion chip is a multifunction device
which may provide numerous interfaces including USB host controller USB gadget,
diff --git a/Documentation/bcache.txt b/Documentation/bcache.txt
index a9259b562d5c..c0ce64d75bbf 100644
--- a/Documentation/bcache.txt
+++ b/Documentation/bcache.txt
@@ -1,10 +1,15 @@
+============================
+A block layer cache (bcache)
+============================
+
Say you've got a big slow raid 6, and an ssd or three. Wouldn't it be
nice if you could use them as cache... Hence bcache.
Wiki and git repositories are at:
- http://bcache.evilpiepirate.org
- http://evilpiepirate.org/git/linux-bcache.git
- http://evilpiepirate.org/git/bcache-tools.git
+
+ - http://bcache.evilpiepirate.org
+ - http://evilpiepirate.org/git/linux-bcache.git
+ - http://evilpiepirate.org/git/bcache-tools.git
It's designed around the performance characteristics of SSDs - it only allocates
in erase block sized buckets, and it uses a hybrid btree/log to track cached
@@ -37,17 +42,19 @@ to be flushed.
Getting started:
You'll need make-bcache from the bcache-tools repository. Both the cache device
-and backing device must be formatted before use.
+and backing device must be formatted before use::
+
make-bcache -B /dev/sdb
make-bcache -C /dev/sdc
make-bcache has the ability to format multiple devices at the same time - if
you format your backing devices and cache device at the same time, you won't
-have to manually attach:
+have to manually attach::
+
make-bcache -B /dev/sda /dev/sdb -C /dev/sdc
bcache-tools now ships udev rules, and bcache devices are known to the kernel
-immediately. Without udev, you can manually register devices like this:
+immediately. Without udev, you can manually register devices like this::
echo /dev/sdb > /sys/fs/bcache/register
echo /dev/sdc > /sys/fs/bcache/register
@@ -60,16 +67,16 @@ slow devices as bcache backing devices without a cache, and you can choose to ad
a caching device later.
See 'ATTACHING' section below.
-The devices show up as:
+The devices show up as::
/dev/bcache<N>
-As well as (with udev):
+As well as (with udev)::
/dev/bcache/by-uuid/<uuid>
/dev/bcache/by-label/<label>
-To get started:
+To get started::
mkfs.ext4 /dev/bcache0
mount /dev/bcache0 /mnt
@@ -81,13 +88,13 @@ Cache devices are managed as sets; multiple caches per set isn't supported yet
but will allow for mirroring of metadata and dirty data in the future. Your new
cache set shows up as /sys/fs/bcache/<UUID>
-ATTACHING
+Attaching
---------
After your cache device and backing device are registered, the backing device
must be attached to your cache set to enable caching. Attaching a backing
device to a cache set is done thusly, with the UUID of the cache set in
-/sys/fs/bcache:
+/sys/fs/bcache::
echo <CSET-UUID> > /sys/block/bcache0/bcache/attach
@@ -97,7 +104,7 @@ your bcache devices. If a backing device has data in a cache somewhere, the
important if you have writeback caching turned on.
If you're booting up and your cache device is gone and never coming back, you
-can force run the backing device:
+can force run the backing device::
echo 1 > /sys/block/sdb/bcache/running
@@ -110,7 +117,7 @@ but all the cached data will be invalidated. If there was dirty data in the
cache, don't expect the filesystem to be recoverable - you will have massive
filesystem corruption, though ext4's fsck does work miracles.
-ERROR HANDLING
+Error Handling
--------------
Bcache tries to transparently handle IO errors to/from the cache device without
@@ -134,25 +141,27 @@ the backing devices to passthrough mode.
read some of the dirty data, though.
-HOWTO/COOKBOOK
+Howto/cookbook
--------------
A) Starting a bcache with a missing caching device
If registering the backing device doesn't help, it's already there, you just need
-to force it to run without the cache:
+to force it to run without the cache::
+
host:~# echo /dev/sdb1 > /sys/fs/bcache/register
[ 119.844831] bcache: register_bcache() error opening /dev/sdb1: device already registered
Next, you try to register your caching device if it's present. However
if it's absent, or registration fails for some reason, you can still
-start your bcache without its cache, like so:
+start your bcache without its cache, like so::
+
host:/sys/block/sdb/sdb1/bcache# echo 1 > running
Note that this may cause data loss if you were running in writeback mode.
-B) Bcache does not find its cache
+B) Bcache does not find its cache::
host:/sys/block/md5/bcache# echo 0226553a-37cf-41d5-b3ce-8b1e944543a8 > attach
[ 1933.455082] bcache: bch_cached_dev_attach() Couldn't find uuid for md5 in set
@@ -160,7 +169,8 @@ B) Bcache does not find its cache
[ 1933.478179] : cache set not found
In this case, the caching device was simply not registered at boot
-or disappeared and came back, and needs to be (re-)registered:
+or disappeared and came back, and needs to be (re-)registered::
+
host:/sys/block/md5/bcache# echo /dev/sdh2 > /sys/fs/bcache/register
@@ -180,7 +190,8 @@ device is still available at an 8KiB offset. So either via a loopdev
of the backing device created with --offset 8K, or any value defined by
--data-offset when you originally formatted bcache with `make-bcache`.
-For example:
+For example::
+
losetup -o 8192 /dev/loop0 /dev/your_bcache_backing_dev
This should present your unmodified backing device data in /dev/loop0
@@ -191,33 +202,38 @@ cache device without loosing data.
E) Wiping a cache device
-host:~# wipefs -a /dev/sdh2
-16 bytes were erased at offset 0x1018 (bcache)
-they were: c6 85 73 f6 4e 1a 45 ca 82 65 f5 7f 48 ba 6d 81
+::
+
+ host:~# wipefs -a /dev/sdh2
+ 16 bytes were erased at offset 0x1018 (bcache)
+ they were: c6 85 73 f6 4e 1a 45 ca 82 65 f5 7f 48 ba 6d 81
+
+After you boot back with bcache enabled, you recreate the cache and attach it::
-After you boot back with bcache enabled, you recreate the cache and attach it:
-host:~# make-bcache -C /dev/sdh2
-UUID: 7be7e175-8f4c-4f99-94b2-9c904d227045
-Set UUID: 5bc072a8-ab17-446d-9744-e247949913c1
-version: 0
-nbuckets: 106874
-block_size: 1
-bucket_size: 1024
-nr_in_set: 1
-nr_this_dev: 0
-first_bucket: 1
-[ 650.511912] bcache: run_cache_set() invalidating existing data
-[ 650.549228] bcache: register_cache() registered cache device sdh2
+ host:~# make-bcache -C /dev/sdh2
+ UUID: 7be7e175-8f4c-4f99-94b2-9c904d227045
+ Set UUID: 5bc072a8-ab17-446d-9744-e247949913c1
+ version: 0
+ nbuckets: 106874
+ block_size: 1
+ bucket_size: 1024
+ nr_in_set: 1
+ nr_this_dev: 0
+ first_bucket: 1
+ [ 650.511912] bcache: run_cache_set() invalidating existing data
+ [ 650.549228] bcache: register_cache() registered cache device sdh2
-start backing device with missing cache:
-host:/sys/block/md5/bcache# echo 1 > running
+start backing device with missing cache::
-attach new cache:
-host:/sys/block/md5/bcache# echo 5bc072a8-ab17-446d-9744-e247949913c1 > attach
-[ 865.276616] bcache: bch_cached_dev_attach() Caching md5 as bcache0 on set 5bc072a8-ab17-446d-9744-e247949913c1
+ host:/sys/block/md5/bcache# echo 1 > running
+attach new cache::
-F) Remove or replace a caching device
+ host:/sys/block/md5/bcache# echo 5bc072a8-ab17-446d-9744-e247949913c1 > attach
+ [ 865.276616] bcache: bch_cached_dev_attach() Caching md5 as bcache0 on set 5bc072a8-ab17-446d-9744-e247949913c1
+
+
+F) Remove or replace a caching device::
host:/sys/block/sda/sda7/bcache# echo 1 > detach
[ 695.872542] bcache: cached_dev_detach_finish() Caching disabled for sda7
@@ -226,13 +242,15 @@ F) Remove or replace a caching device
wipefs: error: /dev/nvme0n1p4: probing initialization failed: Device or resource busy
Ooops, it's disabled, but not unregistered, so it's still protected
-We need to go and unregister it:
+We need to go and unregister it::
+
host:/sys/fs/bcache/b7ba27a1-2398-4649-8ae3-0959f57ba128# ls -l cache0
lrwxrwxrwx 1 root root 0 Feb 25 18:33 cache0 -> ../../../devices/pci0000:00/0000:00:1d.0/0000:70:00.0/nvme/nvme0/nvme0n1/nvme0n1p4/bcache/
host:/sys/fs/bcache/b7ba27a1-2398-4649-8ae3-0959f57ba128# echo 1 > stop
kernel: [ 917.041908] bcache: cache_set_free() Cache set b7ba27a1-2398-4649-8ae3-0959f57ba128 unregistered
-Now we can wipe it:
+Now we can wipe it::
+
host:~# wipefs -a /dev/nvme0n1p4
/dev/nvme0n1p4: 16 bytes were erased at offset 0x00001018 (bcache): c6 85 73 f6 4e 1a 45 ca 82 65 f5 7f 48 ba 6d 81
@@ -252,40 +270,44 @@ if there are any active backing or caching devices left on it:
1) Is it present in /dev/bcache* ? (there are times where it won't be)
-If so, it's easy:
+ If so, it's easy::
+
host:/sys/block/bcache0/bcache# echo 1 > stop
-2) But if your backing device is gone, this won't work:
+2) But if your backing device is gone, this won't work::
+
host:/sys/block/bcache0# cd bcache
bash: cd: bcache: No such file or directory
-In this case, you may have to unregister the dmcrypt block device that
-references this bcache to free it up:
+ In this case, you may have to unregister the dmcrypt block device that
+ references this bcache to free it up::
+
host:~# dmsetup remove oldds1
bcache: bcache_device_free() bcache0 stopped
bcache: cache_set_free() Cache set 5bc072a8-ab17-446d-9744-e247949913c1 unregistered
-This causes the backing bcache to be removed from /sys/fs/bcache and
-then it can be reused. This would be true of any block device stacking
-where bcache is a lower device.
+ This causes the backing bcache to be removed from /sys/fs/bcache and
+ then it can be reused. This would be true of any block device stacking
+ where bcache is a lower device.
+
+3) In other cases, you can also look in /sys/fs/bcache/::
-3) In other cases, you can also look in /sys/fs/bcache/:
+ host:/sys/fs/bcache# ls -l */{cache?,bdev?}
+ lrwxrwxrwx 1 root root 0 Mar 5 09:39 0226553a-37cf-41d5-b3ce-8b1e944543a8/bdev1 -> ../../../devices/virtual/block/dm-1/bcache/
+ lrwxrwxrwx 1 root root 0 Mar 5 09:39 0226553a-37cf-41d5-b3ce-8b1e944543a8/cache0 -> ../../../devices/virtual/block/dm-4/bcache/
+ lrwxrwxrwx 1 root root 0 Mar 5 09:39 5bc072a8-ab17-446d-9744-e247949913c1/cache0 -> ../../../devices/pci0000:00/0000:00:01.0/0000:01:00.0/ata10/host9/target9:0:0/9:0:0:0/block/sdl/sdl2/bcache/
-host:/sys/fs/bcache# ls -l */{cache?,bdev?}
-lrwxrwxrwx 1 root root 0 Mar 5 09:39 0226553a-37cf-41d5-b3ce-8b1e944543a8/bdev1 -> ../../../devices/virtual/block/dm-1/bcache/
-lrwxrwxrwx 1 root root 0 Mar 5 09:39 0226553a-37cf-41d5-b3ce-8b1e944543a8/cache0 -> ../../../devices/virtual/block/dm-4/bcache/
-lrwxrwxrwx 1 root root 0 Mar 5 09:39 5bc072a8-ab17-446d-9744-e247949913c1/cache0 -> ../../../devices/pci0000:00/0000:00:01.0/0000:01:00.0/ata10/host9/target9:0:0/9:0:0:0/block/sdl/sdl2/bcache/
+ The device names will show which UUID is relevant, cd in that directory
+ and stop the cache::
-The device names will show which UUID is relevant, cd in that directory
-and stop the cache:
host:/sys/fs/bcache/5bc072a8-ab17-446d-9744-e247949913c1# echo 1 > stop
-This will free up bcache references and let you reuse the partition for
-other purposes.
+ This will free up bcache references and let you reuse the partition for
+ other purposes.
-TROUBLESHOOTING PERFORMANCE
+Troubleshooting performance
---------------------------
Bcache has a bunch of config options and tunables. The defaults are intended to
@@ -301,11 +323,13 @@ want for getting the best possible numbers when benchmarking.
raid stripe size to get the disk multiples that you would like.
For example: If you have a 64k stripe size, then the following offset
- would provide alignment for many common RAID5 data spindle counts:
+ would provide alignment for many common RAID5 data spindle counts::
+
64k * 2*2*2*3*3*5*7 bytes = 161280k
That space is wasted, but for only 157.5MB you can grow your RAID 5
- volume to the following data-spindle counts without re-aligning:
+ volume to the following data-spindle counts without re-aligning::
+
3,4,5,6,7,8,9,10,12,14,15,18,20,21 ...
- Bad write performance
@@ -313,9 +337,9 @@ want for getting the best possible numbers when benchmarking.
If write performance is not what you expected, you probably wanted to be
running in writeback mode, which isn't the default (not due to a lack of
maturity, but simply because in writeback mode you'll lose data if something
- happens to your SSD)
+ happens to your SSD)::
- # echo writeback > /sys/block/bcache0/bcache/cache_mode
+ # echo writeback > /sys/block/bcache0/bcache/cache_mode
- Bad performance, or traffic not going to the SSD that you'd expect
@@ -325,13 +349,13 @@ want for getting the best possible numbers when benchmarking.
accessed data out of your cache.
But if you want to benchmark reads from cache, and you start out with fio
- writing an 8 gigabyte test file - so you want to disable that.
+ writing an 8 gigabyte test file - so you want to disable that::
- # echo 0 > /sys/block/bcache0/bcache/sequential_cutoff
+ # echo 0 > /sys/block/bcache0/bcache/sequential_cutoff
- To set it back to the default (4 mb), do
+ To set it back to the default (4 mb), do::
- # echo 4M > /sys/block/bcache0/bcache/sequential_cutoff
+ # echo 4M > /sys/block/bcache0/bcache/sequential_cutoff
- Traffic's still going to the spindle/still getting cache misses
@@ -344,10 +368,10 @@ want for getting the best possible numbers when benchmarking.
throttles traffic if the latency exceeds a threshold (it does this by
cranking down the sequential bypass).
- You can disable this if you need to by setting the thresholds to 0:
+ You can disable this if you need to by setting the thresholds to 0::
- # echo 0 > /sys/fs/bcache/<cache set>/congested_read_threshold_us
- # echo 0 > /sys/fs/bcache/<cache set>/congested_write_threshold_us
+ # echo 0 > /sys/fs/bcache/<cache set>/congested_read_threshold_us
+ # echo 0 > /sys/fs/bcache/<cache set>/congested_write_threshold_us
The default is 2000 us (2 milliseconds) for reads, and 20000 for writes.
@@ -369,7 +393,7 @@ want for getting the best possible numbers when benchmarking.
a fix for the issue there).
-SYSFS - BACKING DEVICE
+Sysfs - backing device
----------------------
Available at /sys/block/<bdev>/bcache, /sys/block/bcache*/bcache and
@@ -454,7 +478,8 @@ writeback_running
still be added to the cache until it is mostly full; only meant for
benchmarking. Defaults to on.
-SYSFS - BACKING DEVICE STATS:
+Sysfs - backing device stats
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There are directories with these numbers for a running total, as well as
versions that decay over the past day, hour and 5 minutes; they're also
@@ -463,14 +488,11 @@ aggregated in the cache set directory as well.
bypassed
Amount of IO (both reads and writes) that has bypassed the cache
-cache_hits
-cache_misses
-cache_hit_ratio
+cache_hits, cache_misses, cache_hit_ratio
Hits and misses are counted per individual IO as bcache sees them; a
partial hit is counted as a miss.
-cache_bypass_hits
-cache_bypass_misses
+cache_bypass_hits, cache_bypass_misses
Hits and misses for IO that is intended to skip the cache are still counted,
but broken out here.
@@ -482,7 +504,8 @@ cache_miss_collisions
cache_readaheads
Count of times readahead occurred.
-SYSFS - CACHE SET:
+Sysfs - cache set
+~~~~~~~~~~~~~~~~~
Available at /sys/fs/bcache/<cset-uuid>
@@ -520,8 +543,7 @@ flash_vol_create
Echoing a size to this file (in human readable units, k/M/G) creates a thinly
provisioned volume backed by the cache set.
-io_error_halflife
-io_error_limit
+io_error_halflife, io_error_limit
These determines how many errors we accept before disabling the cache.
Each error is decayed by the half life (in # ios). If the decaying count
reaches io_error_limit dirty data is written out and the cache is disabled.
@@ -545,7 +567,8 @@ unregister
Detaches all backing devices and closes the cache devices; if dirty data is
present it will disable writeback caching and wait for it to be flushed.
-SYSFS - CACHE SET INTERNAL:
+Sysfs - cache set internal
+~~~~~~~~~~~~~~~~~~~~~~~~~~
This directory also exposes timings for a number of internal operations, with
separate files for average duration, average frequency, last occurrence and max
@@ -574,7 +597,8 @@ cache_read_races
trigger_gc
Writing to this file forces garbage collection to run.
-SYSFS - CACHE DEVICE:
+Sysfs - Cache device
+~~~~~~~~~~~~~~~~~~~~
Available at /sys/block/<cdev>/bcache
diff --git a/Documentation/bt8xxgpio.txt b/Documentation/bt8xxgpio.txt
index d8297e4ebd26..a845feb074de 100644
--- a/Documentation/bt8xxgpio.txt
+++ b/Documentation/bt8xxgpio.txt
@@ -1,12 +1,8 @@
-===============================================================
-== BT8XXGPIO driver ==
-== ==
-== A driver for a selfmade cheap BT8xx based PCI GPIO-card ==
-== ==
-== For advanced documentation, see ==
-== http://www.bu3sch.de/btgpio.php ==
-===============================================================
+===================================================================
+A driver for a selfmade cheap BT8xx based PCI GPIO-card (bt8xxgpio)
+===================================================================
+For advanced documentation, see http://www.bu3sch.de/btgpio.php
A generic digital 24-port PCI GPIO card can be built out of an ordinary
Brooktree bt848, bt849, bt878 or bt879 based analog TV tuner card. The
@@ -17,9 +13,8 @@ The bt8xx chip does have 24 digital GPIO ports.
These ports are accessible via 24 pins on the SMD chip package.
-==============================================
-== How to physically access the GPIO pins ==
-==============================================
+How to physically access the GPIO pins
+======================================
The are several ways to access these pins. One might unsolder the whole chip
and put it on a custom PCI board, or one might only unsolder each individual
@@ -27,7 +22,7 @@ GPIO pin and solder that to some tiny wire. As the chip package really is tiny
there are some advanced soldering skills needed in any case.
The physical pinouts are drawn in the following ASCII art.
-The GPIO pins are marked with G00-G23
+The GPIO pins are marked with G00-G23::
G G G G G G G G G G G G G G G G G G
0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1
diff --git a/Documentation/btmrvl.txt b/Documentation/btmrvl.txt
index 34916a46c099..ec57740ead0c 100644
--- a/Documentation/btmrvl.txt
+++ b/Documentation/btmrvl.txt
@@ -1,18 +1,16 @@
-=======================================================================
- README for btmrvl driver
-=======================================================================
-
+=============
+btmrvl driver
+=============
All commands are used via debugfs interface.
-=====================
-Set/get driver configurations:
+Set/get driver configurations
+=============================
Path: /debug/btmrvl/config/
-gpiogap=[n]
-hscfgcmd
- These commands are used to configure the host sleep parameters.
+gpiogap=[n], hscfgcmd
+ These commands are used to configure the host sleep parameters::
bit 8:0 -- Gap
bit 16:8 -- GPIO
@@ -23,7 +21,8 @@ hscfgcmd
where Gap is the gap in milli seconds between wakeup signal and
wakeup event, or 0xff for special host sleep setting.
- Usage:
+ Usage::
+
# Use SDIO interface to wake up the host and set GAP to 0x80:
echo 0xff80 > /debug/btmrvl/config/gpiogap
echo 1 > /debug/btmrvl/config/hscfgcmd
@@ -32,15 +31,16 @@ hscfgcmd
echo 0x03ff > /debug/btmrvl/config/gpiogap
echo 1 > /debug/btmrvl/config/hscfgcmd
-psmode=[n]
-pscmd
+psmode=[n], pscmd
These commands are used to enable/disable auto sleep mode
- where the option is:
+ where the option is::
+
1 -- Enable auto sleep mode
0 -- Disable auto sleep mode
- Usage:
+ Usage::
+
# Enable auto sleep mode
echo 1 > /debug/btmrvl/config/psmode
echo 1 > /debug/btmrvl/config/pscmd
@@ -50,15 +50,16 @@ pscmd
echo 1 > /debug/btmrvl/config/pscmd
-hsmode=[n]
-hscmd
+hsmode=[n], hscmd
These commands are used to enable host sleep or wake up firmware
- where the option is:
+ where the option is::
+
1 -- Enable host sleep
0 -- Wake up firmware
- Usage:
+ Usage::
+
# Enable host sleep
echo 1 > /debug/btmrvl/config/hsmode
echo 1 > /debug/btmrvl/config/hscmd
@@ -68,12 +69,13 @@ hscmd
echo 1 > /debug/btmrvl/config/hscmd
-======================
-Get driver status:
+Get driver status
+=================
Path: /debug/btmrvl/status/
-Usage:
+Usage::
+
cat /debug/btmrvl/status/<args>
where the args are:
@@ -90,14 +92,17 @@ hsstate
txdnldrdy
This command displays the value of Tx download ready flag.
-
-=====================
+Issuing a raw hci command
+=========================
Use hcitool to issue raw hci command, refer to hcitool manual
- Usage: Hcitool cmd <ogf> <ocf> [Parameters]
+Usage::
+
+ Hcitool cmd <ogf> <ocf> [Parameters]
+
+Interface Control Command::
- Interface Control Command
hcitool cmd 0x3f 0x5b 0xf5 0x01 0x00 --Enable All interface
hcitool cmd 0x3f 0x5b 0xf5 0x01 0x01 --Enable Wlan interface
hcitool cmd 0x3f 0x5b 0xf5 0x01 0x02 --Enable BT interface
@@ -105,13 +110,13 @@ Use hcitool to issue raw hci command, refer to hcitool manual
hcitool cmd 0x3f 0x5b 0xf5 0x00 0x01 --Disable Wlan interface
hcitool cmd 0x3f 0x5b 0xf5 0x00 0x02 --Disable BT interface
-=======================================================================
-
+SD8688 firmware
+===============
-SD8688 firmware:
+Images:
-/lib/firmware/sd8688_helper.bin
-/lib/firmware/sd8688.bin
+- /lib/firmware/sd8688_helper.bin
+- /lib/firmware/sd8688.bin
The images can be downloaded from:
diff --git a/Documentation/bus-virt-phys-mapping.txt b/Documentation/bus-virt-phys-mapping.txt
index 2bc55ff3b4d1..4bb07c2f3e7d 100644
--- a/Documentation/bus-virt-phys-mapping.txt
+++ b/Documentation/bus-virt-phys-mapping.txt
@@ -1,17 +1,27 @@
-[ NOTE: The virt_to_bus() and bus_to_virt() functions have been
+==========================================================
+How to access I/O mapped memory from within device drivers
+==========================================================
+
+:Author: Linus
+
+.. warning::
+
+ The virt_to_bus() and bus_to_virt() functions have been
superseded by the functionality provided by the PCI DMA interface
(see Documentation/DMA-API-HOWTO.txt). They continue
to be documented below for historical purposes, but new code
- must not use them. --davidm 00/12/12 ]
+ must not use them. --davidm 00/12/12
-[ This is a mail message in response to a query on IO mapping, thus the
- strange format for a "document" ]
+::
+
+ [ This is a mail message in response to a query on IO mapping, thus the
+ strange format for a "document" ]
The AHA-1542 is a bus-master device, and your patch makes the driver give the
controller the physical address of the buffers, which is correct on x86
(because all bus master devices see the physical memory mappings directly).
-However, on many setups, there are actually _three_ different ways of looking
+However, on many setups, there are actually **three** different ways of looking
at memory addresses, and in this case we actually want the third, the
so-called "bus address".
@@ -38,7 +48,7 @@ because the memory and the devices share the same address space, and that is
not generally necessarily true on other PCI/ISA setups.
Now, just as an example, on the PReP (PowerPC Reference Platform), the
-CPU sees a memory map something like this (this is from memory):
+CPU sees a memory map something like this (this is from memory)::
0-2 GB "real memory"
2 GB-3 GB "system IO" (inb/out and similar accesses on x86)
@@ -52,7 +62,7 @@ So when the CPU wants any bus master to write to physical memory 0, it
has to give the master address 0x80000000 as the memory address.
So, for example, depending on how the kernel is actually mapped on the
-PPC, you can end up with a setup like this:
+PPC, you can end up with a setup like this::
physical address: 0
virtual address: 0xC0000000
@@ -61,7 +71,7 @@ PPC, you can end up with a setup like this:
where all the addresses actually point to the same thing. It's just seen
through different translations..
-Similarly, on the Alpha, the normal translation is
+Similarly, on the Alpha, the normal translation is::
physical address: 0
virtual address: 0xfffffc0000000000
@@ -70,7 +80,7 @@ Similarly, on the Alpha, the normal translation is
(but there are also Alphas where the physical address and the bus address
are the same).
-Anyway, the way to look up all these translations, you do
+Anyway, the way to look up all these translations, you do::
#include <asm/io.h>
@@ -81,8 +91,8 @@ Anyway, the way to look up all these translations, you do
Now, when do you need these?
-You want the _virtual_ address when you are actually going to access that
-pointer from the kernel. So you can have something like this:
+You want the **virtual** address when you are actually going to access that
+pointer from the kernel. So you can have something like this::
/*
* this is the hardware "mailbox" we use to communicate with
@@ -104,7 +114,7 @@ pointer from the kernel. So you can have something like this:
...
on the other hand, you want the bus address when you have a buffer that
-you want to give to the controller:
+you want to give to the controller::
/* ask the controller to read the sense status into "sense_buffer" */
mbox.bufstart = virt_to_bus(&sense_buffer);
@@ -112,7 +122,7 @@ you want to give to the controller:
mbox.status = 0;
notify_controller(&mbox);
-And you generally _never_ want to use the physical address, because you can't
+And you generally **never** want to use the physical address, because you can't
use that from the CPU (the CPU only uses translated virtual addresses), and
you can't use it from the bus master.
@@ -124,8 +134,10 @@ be remapped as measured in units of pages, a.k.a. the pfn (the memory
management layer doesn't know about devices outside the CPU, so it
shouldn't need to know about "bus addresses" etc).
-NOTE NOTE NOTE! The above is only one part of the whole equation. The above
-only talks about "real memory", that is, CPU memory (RAM).
+.. note::
+
+ The above is only one part of the whole equation. The above
+ only talks about "real memory", that is, CPU memory (RAM).
There is a completely different type of memory too, and that's the "shared
memory" on the PCI or ISA bus. That's generally not RAM (although in the case
@@ -137,20 +149,22 @@ whatever, and there is only one way to access it: the readb/writeb and
related functions. You should never take the address of such memory, because
there is really nothing you can do with such an address: it's not
conceptually in the same memory space as "real memory" at all, so you cannot
-just dereference a pointer. (Sadly, on x86 it _is_ in the same memory space,
+just dereference a pointer. (Sadly, on x86 it **is** in the same memory space,
so on x86 it actually works to just deference a pointer, but it's not
portable).
-For such memory, you can do things like
+For such memory, you can do things like:
+
+ - reading::
- - reading:
/*
* read first 32 bits from ISA memory at 0xC0000, aka
* C000:0000 in DOS terms
*/
unsigned int signature = isa_readl(0xC0000);
- - remapping and writing:
+ - remapping and writing::
+
/*
* remap framebuffer PCI memory area at 0xFC000000,
* size 1MB, so that we can access it: We can directly
@@ -165,7 +179,8 @@ For such memory, you can do things like
/* unmap when we unload the driver */
iounmap(baseptr);
- - copying and clearing:
+ - copying and clearing::
+
/* get the 6-byte Ethernet address at ISA address E000:0040 */
memcpy_fromio(kernel_buffer, 0xE0040, 6);
/* write a packet to the driver */
@@ -181,10 +196,10 @@ happy that your driver works ;)
Note that kernel versions 2.0.x (and earlier) mistakenly called the
ioremap() function "vremap()". ioremap() is the proper name, but I
didn't think straight when I wrote it originally. People who have to
-support both can do something like:
+support both can do something like::
/* support old naming silliness */
- #if LINUX_VERSION_CODE < 0x020100
+ #if LINUX_VERSION_CODE < 0x020100
#define ioremap vremap
#define iounmap vfree
#endif
@@ -196,13 +211,10 @@ And the above sounds worse than it really is. Most real drivers really
don't do all that complex things (or rather: the complexity is not so
much in the actual IO accesses as in error handling and timeouts etc).
It's generally not hard to fix drivers, and in many cases the code
-actually looks better afterwards:
+actually looks better afterwards::
unsigned long signature = *(unsigned int *) 0xC0000;
vs
unsigned long signature = readl(0xC0000);
I think the second version actually is more readable, no?
-
- Linus
-
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index 3f9f808b5119..6eb9d3f090cd 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -1,7 +1,8 @@
- Cache and TLB Flushing
- Under Linux
+==================================
+Cache and TLB Flushing Under Linux
+==================================
- David S. Miller <davem@redhat.com>
+:Author: David S. Miller <davem@redhat.com>
This document describes the cache/tlb flushing interfaces called
by the Linux VM subsystem. It enumerates over each interface,
@@ -28,7 +29,7 @@ Therefore when software page table changes occur, the kernel will
invoke one of the following flush methods _after_ the page table
changes occur:
-1) void flush_tlb_all(void)
+1) ``void flush_tlb_all(void)``
The most severe flush of all. After this interface runs,
any previous page table modification whatsoever will be
@@ -37,7 +38,7 @@ changes occur:
This is usually invoked when the kernel page tables are
changed, since such translations are "global" in nature.
-2) void flush_tlb_mm(struct mm_struct *mm)
+2) ``void flush_tlb_mm(struct mm_struct *mm)``
This interface flushes an entire user address space from
the TLB. After running, this interface must make sure that
@@ -49,8 +50,8 @@ changes occur:
page table operations such as what happens during
fork, and exec.
-3) void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+3) ``void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)``
Here we are flushing a specific range of (user) virtual
address translations from the TLB. After running, this
@@ -69,7 +70,7 @@ changes occur:
call flush_tlb_page (see below) for each entry which may be
modified.
-4) void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+4) ``void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)``
This time we need to remove the PAGE_SIZE sized translation
from the TLB. The 'vma' is the backing structure used by
@@ -87,8 +88,8 @@ changes occur:
This is used primarily during fault processing.
-5) void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
+5) ``void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)``
At the end of every page fault, this routine is invoked to
tell the architecture specific code that a translation
@@ -100,7 +101,7 @@ changes occur:
translations for software managed TLB configurations.
The sparc64 port currently does this.
-6) void tlb_migrate_finish(struct mm_struct *mm)
+6) ``void tlb_migrate_finish(struct mm_struct *mm)``
This interface is called at the end of an explicit
process migration. This interface provides a hook
@@ -112,7 +113,7 @@ changes occur:
Next, we have the cache flushing interfaces. In general, when Linux
is changing an existing virtual-->physical mapping to a new value,
-the sequence will be in one of the following forms:
+the sequence will be in one of the following forms::
1) flush_cache_mm(mm);
change_all_page_tables_of(mm);
@@ -143,7 +144,7 @@ and have no dependency on translation information.
Here are the routines, one by one:
-1) void flush_cache_mm(struct mm_struct *mm)
+1) ``void flush_cache_mm(struct mm_struct *mm)``
This interface flushes an entire user address space from
the caches. That is, after running, there will be no cache
@@ -152,7 +153,7 @@ Here are the routines, one by one:
This interface is used to handle whole address space
page table operations such as what happens during exit and exec.
-2) void flush_cache_dup_mm(struct mm_struct *mm)
+2) ``void flush_cache_dup_mm(struct mm_struct *mm)``
This interface flushes an entire user address space from
the caches. That is, after running, there will be no cache
@@ -164,8 +165,8 @@ Here are the routines, one by one:
This option is separate from flush_cache_mm to allow some
optimizations for VIPT caches.
-3) void flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+3) ``void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)``
Here we are flushing a specific range of (user) virtual
addresses from the cache. After running, there will be no
@@ -181,7 +182,7 @@ Here are the routines, one by one:
call flush_cache_page (see below) for each entry which may be
modified.
-4) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
+4) ``void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)``
This time we need to remove a PAGE_SIZE sized range
from the cache. The 'vma' is the backing structure used by
@@ -202,7 +203,7 @@ Here are the routines, one by one:
This is used primarily during fault processing.
-5) void flush_cache_kmaps(void)
+5) ``void flush_cache_kmaps(void)``
This routine need only be implemented if the platform utilizes
highmem. It will be called right before all of the kmaps
@@ -214,8 +215,8 @@ Here are the routines, one by one:
This routing should be implemented in asm/highmem.h
-6) void flush_cache_vmap(unsigned long start, unsigned long end)
- void flush_cache_vunmap(unsigned long start, unsigned long end)
+6) ``void flush_cache_vmap(unsigned long start, unsigned long end)``
+ ``void flush_cache_vunmap(unsigned long start, unsigned long end)``
Here in these two interfaces we are flushing a specific range
of (kernel) virtual addresses from the cache. After running,
@@ -243,8 +244,10 @@ size). This setting will force the SYSv IPC layer to only allow user
processes to mmap shared memory at address which are a multiple of
this value.
-NOTE: This does not fix shared mmaps, check out the sparc64 port for
-one way to solve this (in particular SPARC_FLAG_MMAPSHARED).
+.. note::
+
+ This does not fix shared mmaps, check out the sparc64 port for
+ one way to solve this (in particular SPARC_FLAG_MMAPSHARED).
Next, you have to solve the D-cache aliasing issue for all
other cases. Please keep in mind that fact that, for a given page
@@ -255,8 +258,8 @@ physical page into its address space, by implication the D-cache
aliasing problem has the potential to exist since the kernel already
maps this page at its virtual address.
- void copy_user_page(void *to, void *from, unsigned long addr, struct page *page)
- void clear_user_page(void *to, unsigned long addr, struct page *page)
+ ``void copy_user_page(void *to, void *from, unsigned long addr, struct page *page)``
+ ``void clear_user_page(void *to, unsigned long addr, struct page *page)``
These two routines store data in user anonymous or COW
pages. It allows a port to efficiently avoid D-cache alias
@@ -276,14 +279,16 @@ maps this page at its virtual address.
If D-cache aliasing is not an issue, these two routines may
simply call memcpy/memset directly and do nothing more.
- void flush_dcache_page(struct page *page)
+ ``void flush_dcache_page(struct page *page)``
Any time the kernel writes to a page cache page, _OR_
the kernel is about to read from a page cache page and
user space shared/writable mappings of this page potentially
exist, this routine is called.
- NOTE: This routine need only be called for page cache pages
+ .. note::
+
+ This routine need only be called for page cache pages
which can potentially ever be mapped into the address
space of a user process. So for example, VFS layer code
handling vfs symlinks in the page cache need not call
@@ -322,18 +327,19 @@ maps this page at its virtual address.
made of this flag bit, and if set the flush is done and the flag
bit is cleared.
- IMPORTANT NOTE: It is often important, if you defer the flush,
+ .. important::
+
+ It is often important, if you defer the flush,
that the actual flush occurs on the same CPU
as did the cpu stores into the page to make it
dirty. Again, see sparc64 for examples of how
to deal with this.
- void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
- unsigned long user_vaddr,
- void *dst, void *src, int len)
- void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
- unsigned long user_vaddr,
- void *dst, void *src, int len)
+ ``void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr, void *dst, void *src, int len)``
+ ``void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr, void *dst, void *src, int len)``
+
When the kernel needs to copy arbitrary data in and out
of arbitrary user pages (f.e. for ptrace()) it will use
these two routines.
@@ -344,8 +350,9 @@ maps this page at its virtual address.
likely that you will need to flush the instruction cache
for copy_to_user_page().
- void flush_anon_page(struct vm_area_struct *vma, struct page *page,
- unsigned long vmaddr)
+ ``void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vmaddr)``
+
When the kernel needs to access the contents of an anonymous
page, it calls this function (currently only
get_user_pages()). Note: flush_dcache_page() deliberately
@@ -354,7 +361,8 @@ maps this page at its virtual address.
architectures). For incoherent architectures, it should flush
the cache of the page at vmaddr.
- void flush_kernel_dcache_page(struct page *page)
+ ``void flush_kernel_dcache_page(struct page *page)``
+
When the kernel needs to modify a user page is has obtained
with kmap, it calls this function after all modifications are
complete (but before kunmapping it) to bring the underlying
@@ -366,14 +374,16 @@ maps this page at its virtual address.
the kernel cache for page (using page_address(page)).
- void flush_icache_range(unsigned long start, unsigned long end)
+ ``void flush_icache_range(unsigned long start, unsigned long end)``
+
When the kernel stores into addresses that it will execute
out of (eg when loading modules), this function is called.
If the icache does not snoop stores then this routine will need
to flush it.
- void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+ ``void flush_icache_page(struct vm_area_struct *vma, struct page *page)``
+
All the functionality of flush_icache_page can be implemented in
flush_dcache_page and update_mmu_cache. In the future, the hope
is to remove this interface completely.
@@ -387,7 +397,8 @@ the kernel trying to do I/O to vmap areas must manually manage
coherency. It must do this by flushing the vmap range before doing
I/O and invalidating it after the I/O returns.
- void flush_kernel_vmap_range(void *vaddr, int size)
+ ``void flush_kernel_vmap_range(void *vaddr, int size)``
+
flushes the kernel cache for a given virtual address range in
the vmap area. This is to make sure that any data the kernel
modified in the vmap range is made visible to the physical
@@ -395,7 +406,8 @@ I/O and invalidating it after the I/O returns.
Note that this API does *not* also flush the offset map alias
of the area.
- void invalidate_kernel_vmap_range(void *vaddr, int size) invalidates
+ ``void invalidate_kernel_vmap_range(void *vaddr, int size) invalidates``
+
the cache for a given virtual address range in the vmap area
which prevents the processor from making the cache stale by
speculatively reading data while the I/O was occurring to the
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index e6101976e0f1..bde177103567 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -1,7 +1,9 @@
-
+================
Control Group v2
+================
-October, 2015 Tejun Heo <tj@kernel.org>
+:Date: October, 2015
+:Author: Tejun Heo <tj@kernel.org>
This is the authoritative documentation on the design, interface and
conventions of cgroup v2. It describes all userland-visible aspects
@@ -9,70 +11,72 @@ of cgroup including core and specific controller behaviors. All
future changes must be reflected in this document. Documentation for
v1 is available under Documentation/cgroup-v1/.
-CONTENTS
-
-1. Introduction
- 1-1. Terminology
- 1-2. What is cgroup?
-2. Basic Operations
- 2-1. Mounting
- 2-2. Organizing Processes
- 2-3. [Un]populated Notification
- 2-4. Controlling Controllers
- 2-4-1. Enabling and Disabling
- 2-4-2. Top-down Constraint
- 2-4-3. No Internal Process Constraint
- 2-5. Delegation
- 2-5-1. Model of Delegation
- 2-5-2. Delegation Containment
- 2-6. Guidelines
- 2-6-1. Organize Once and Control
- 2-6-2. Avoid Name Collisions
-3. Resource Distribution Models
- 3-1. Weights
- 3-2. Limits
- 3-3. Protections
- 3-4. Allocations
-4. Interface Files
- 4-1. Format
- 4-2. Conventions
- 4-3. Core Interface Files
-5. Controllers
- 5-1. CPU
- 5-1-1. CPU Interface Files
- 5-2. Memory
- 5-2-1. Memory Interface Files
- 5-2-2. Usage Guidelines
- 5-2-3. Memory Ownership
- 5-3. IO
- 5-3-1. IO Interface Files
- 5-3-2. Writeback
- 5-4. PID
- 5-4-1. PID Interface Files
- 5-5. RDMA
- 5-5-1. RDMA Interface Files
- 5-6. Misc
- 5-6-1. perf_event
-6. Namespace
- 6-1. Basics
- 6-2. The Root and Views
- 6-3. Migration and setns(2)
- 6-4. Interaction with Other Namespaces
-P. Information on Kernel Programming
- P-1. Filesystem Support for Writeback
-D. Deprecated v1 Core Features
-R. Issues with v1 and Rationales for v2
- R-1. Multiple Hierarchies
- R-2. Thread Granularity
- R-3. Competition Between Inner Nodes and Threads
- R-4. Other Interface Issues
- R-5. Controller Issues and Remedies
- R-5-1. Memory
-
-
-1. Introduction
-
-1-1. Terminology
+.. CONTENTS
+
+ 1. Introduction
+ 1-1. Terminology
+ 1-2. What is cgroup?
+ 2. Basic Operations
+ 2-1. Mounting
+ 2-2. Organizing Processes
+ 2-3. [Un]populated Notification
+ 2-4. Controlling Controllers
+ 2-4-1. Enabling and Disabling
+ 2-4-2. Top-down Constraint
+ 2-4-3. No Internal Process Constraint
+ 2-5. Delegation
+ 2-5-1. Model of Delegation
+ 2-5-2. Delegation Containment
+ 2-6. Guidelines
+ 2-6-1. Organize Once and Control
+ 2-6-2. Avoid Name Collisions
+ 3. Resource Distribution Models
+ 3-1. Weights
+ 3-2. Limits
+ 3-3. Protections
+ 3-4. Allocations
+ 4. Interface Files
+ 4-1. Format
+ 4-2. Conventions
+ 4-3. Core Interface Files
+ 5. Controllers
+ 5-1. CPU
+ 5-1-1. CPU Interface Files
+ 5-2. Memory
+ 5-2-1. Memory Interface Files
+ 5-2-2. Usage Guidelines
+ 5-2-3. Memory Ownership
+ 5-3. IO
+ 5-3-1. IO Interface Files
+ 5-3-2. Writeback
+ 5-4. PID
+ 5-4-1. PID Interface Files
+ 5-5. RDMA
+ 5-5-1. RDMA Interface Files
+ 5-6. Misc
+ 5-6-1. perf_event
+ 6. Namespace
+ 6-1. Basics
+ 6-2. The Root and Views
+ 6-3. Migration and setns(2)
+ 6-4. Interaction with Other Namespaces
+ P. Information on Kernel Programming
+ P-1. Filesystem Support for Writeback
+ D. Deprecated v1 Core Features
+ R. Issues with v1 and Rationales for v2
+ R-1. Multiple Hierarchies
+ R-2. Thread Granularity
+ R-3. Competition Between Inner Nodes and Threads
+ R-4. Other Interface Issues
+ R-5. Controller Issues and Remedies
+ R-5-1. Memory
+
+
+Introduction
+============
+
+Terminology
+-----------
"cgroup" stands for "control group" and is never capitalized. The
singular form is used to designate the whole feature and also as a
@@ -80,7 +84,8 @@ qualifier as in "cgroup controllers". When explicitly referring to
multiple individual control groups, the plural form "cgroups" is used.
-1-2. What is cgroup?
+What is cgroup?
+---------------
cgroup is a mechanism to organize processes hierarchically and
distribute system resources along the hierarchy in a controlled and
@@ -110,12 +115,14 @@ restrictions set closer to the root in the hierarchy can not be
overridden from further away.
-2. Basic Operations
+Basic Operations
+================
-2-1. Mounting
+Mounting
+--------
Unlike v1, cgroup v2 has only single hierarchy. The cgroup v2
-hierarchy can be mounted with the following mount command.
+hierarchy can be mounted with the following mount command::
# mount -t cgroup2 none $MOUNT_POINT
@@ -160,10 +167,11 @@ cgroup v2 currently supports the following mount options.
Delegation section for details.
-2-2. Organizing Processes
+Organizing Processes
+--------------------
Initially, only the root cgroup exists to which all processes belong.
-A child cgroup can be created by creating a sub-directory.
+A child cgroup can be created by creating a sub-directory::
# mkdir $CGROUP_NAME
@@ -190,28 +198,29 @@ moved to another cgroup.
A cgroup which doesn't have any children or live processes can be
destroyed by removing the directory. Note that a cgroup which doesn't
have any children and is associated only with zombie processes is
-considered empty and can be removed.
+considered empty and can be removed::
# rmdir $CGROUP_NAME
"/proc/$PID/cgroup" lists a process's cgroup membership. If legacy
cgroup is in use in the system, this file may contain multiple lines,
one for each hierarchy. The entry for cgroup v2 is always in the
-format "0::$PATH".
+format "0::$PATH"::
# cat /proc/842/cgroup
...
0::/test-cgroup/test-cgroup-nested
If the process becomes a zombie and the cgroup it was associated with
-is removed subsequently, " (deleted)" is appended to the path.
+is removed subsequently, " (deleted)" is appended to the path::
# cat /proc/842/cgroup
...
0::/test-cgroup/test-cgroup-nested (deleted)
-2-3. [Un]populated Notification
+[Un]populated Notification
+--------------------------
Each non-root cgroup has a "cgroup.events" file which contains
"populated" field indicating whether the cgroup's sub-hierarchy has
@@ -222,7 +231,7 @@ example, to start a clean-up operation after all processes of a given
sub-hierarchy have exited. The populated state updates and
notifications are recursive. Consider the following sub-hierarchy
where the numbers in the parentheses represent the numbers of processes
-in each cgroup.
+in each cgroup::
A(4) - B(0) - C(1)
\ D(0)
@@ -233,18 +242,20 @@ file modified events will be generated on the "cgroup.events" files of
both cgroups.
-2-4. Controlling Controllers
+Controlling Controllers
+-----------------------
-2-4-1. Enabling and Disabling
+Enabling and Disabling
+~~~~~~~~~~~~~~~~~~~~~~
Each cgroup has a "cgroup.controllers" file which lists all
-controllers available for the cgroup to enable.
+controllers available for the cgroup to enable::
# cat cgroup.controllers
cpu io memory
No controller is enabled by default. Controllers can be enabled and
-disabled by writing to the "cgroup.subtree_control" file.
+disabled by writing to the "cgroup.subtree_control" file::
# echo "+cpu +memory -io" > cgroup.subtree_control
@@ -256,7 +267,7 @@ are specified, the last one is effective.
Enabling a controller in a cgroup indicates that the distribution of
the target resource across its immediate children will be controlled.
Consider the following sub-hierarchy. The enabled controllers are
-listed in parentheses.
+listed in parentheses::
A(cpu,memory) - B(memory) - C()
\ D()
@@ -276,7 +287,8 @@ controller interface files - anything which doesn't start with
"cgroup." are owned by the parent rather than the cgroup itself.
-2-4-2. Top-down Constraint
+Top-down Constraint
+~~~~~~~~~~~~~~~~~~~
Resources are distributed top-down and a cgroup can further distribute
a resource only if the resource has been distributed to it from the
@@ -287,7 +299,8 @@ the parent has the controller enabled and a controller can't be
disabled if one or more children have it enabled.
-2-4-3. No Internal Process Constraint
+No Internal Process Constraint
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Non-root cgroups can only distribute resources to their children when
they don't have any processes of their own. In other words, only
@@ -314,9 +327,11 @@ children before enabling controllers in its "cgroup.subtree_control"
file.
-2-5. Delegation
+Delegation
+----------
-2-5-1. Model of Delegation
+Model of Delegation
+~~~~~~~~~~~~~~~~~~~
A cgroup can be delegated in two ways. First, to a less privileged
user by granting write access of the directory and its "cgroup.procs"
@@ -345,7 +360,8 @@ cgroups in or nesting depth of a delegated sub-hierarchy; however,
this may be limited explicitly in the future.
-2-5-2. Delegation Containment
+Delegation Containment
+~~~~~~~~~~~~~~~~~~~~~~
A delegated sub-hierarchy is contained in the sense that processes
can't be moved into or out of the sub-hierarchy by the delegatee.
@@ -366,7 +382,7 @@ in from or push out to outside the sub-hierarchy.
For an example, let's assume cgroups C0 and C1 have been delegated to
user U0 who created C00, C01 under C0 and C10 under C1 as follows and
-all processes under C0 and C1 belong to U0.
+all processes under C0 and C1 belong to U0::
~~~~~~~~~~~~~ - C0 - C00
~ cgroup ~ \ C01
@@ -386,9 +402,11 @@ namespace of the process which is attempting the migration. If either
is not reachable, the migration is rejected with -ENOENT.
-2-6. Guidelines
+Guidelines
+----------
-2-6-1. Organize Once and Control
+Organize Once and Control
+~~~~~~~~~~~~~~~~~~~~~~~~~
Migrating a process across cgroups is a relatively expensive operation
and stateful resources such as memory are not moved together with the
@@ -404,7 +422,8 @@ distribution can be made by changing controller configuration through
the interface files.
-2-6-2. Avoid Name Collisions
+Avoid Name Collisions
+~~~~~~~~~~~~~~~~~~~~~
Interface files for a cgroup and its children cgroups occupy the same
directory and it is possible to create children cgroups which collide
@@ -422,14 +441,16 @@ cgroup doesn't do anything to prevent name collisions and it's the
user's responsibility to avoid them.
-3. Resource Distribution Models
+Resource Distribution Models
+============================
cgroup controllers implement several resource distribution schemes
depending on the resource type and expected use cases. This section
describes major schemes in use along with their expected behaviors.
-3-1. Weights
+Weights
+-------
A parent's resource is distributed by adding up the weights of all
active children and giving each the fraction matching the ratio of its
@@ -450,7 +471,8 @@ process migrations.
and is an example of this type.
-3-2. Limits
+Limits
+------
A child can only consume upto the configured amount of the resource.
Limits can be over-committed - the sum of the limits of children can
@@ -466,7 +488,8 @@ process migrations.
on an IO device and is an example of this type.
-3-3. Protections
+Protections
+-----------
A cgroup is protected to be allocated upto the configured amount of
the resource if the usages of all its ancestors are under their
@@ -486,7 +509,8 @@ process migrations.
example of this type.
-3-4. Allocations
+Allocations
+-----------
A cgroup is exclusively allocated a certain amount of a finite
resource. Allocations can't be over-committed - the sum of the
@@ -505,12 +529,14 @@ may be rejected.
type.
-4. Interface Files
+Interface Files
+===============
-4-1. Format
+Format
+------
All interface files should be in one of the following formats whenever
-possible.
+possible::
New-line separated values
(when only one value can be written at once)
@@ -545,7 +571,8 @@ can be written at a time. For nested keyed files, the sub key pairs
may be specified in any order and not all pairs have to be specified.
-4-2. Conventions
+Conventions
+-----------
- Settings for a single feature should be contained in a single file.
@@ -581,25 +608,25 @@ may be specified in any order and not all pairs have to be specified.
with "default" as the value must not appear when read.
For example, a setting which is keyed by major:minor device numbers
- with integer values may look like the following.
+ with integer values may look like the following::
# cat cgroup-example-interface-file
default 150
8:0 300
- The default value can be updated by
+ The default value can be updated by::
# echo 125 > cgroup-example-interface-file
- or
+ or::
# echo "default 125" > cgroup-example-interface-file
- An override can be set by
+ An override can be set by::
# echo "8:16 170" > cgroup-example-interface-file
- and cleared by
+ and cleared by::
# echo "8:0 default" > cgroup-example-interface-file
# cat cgroup-example-interface-file
@@ -612,12 +639,12 @@ may be specified in any order and not all pairs have to be specified.
generated on the file.
-4-3. Core Interface Files
+Core Interface Files
+--------------------
All cgroup core files are prefixed with "cgroup."
cgroup.procs
-
A read-write new-line separated values file which exists on
all cgroups.
@@ -643,7 +670,6 @@ All cgroup core files are prefixed with "cgroup."
should be granted along with the containing directory.
cgroup.controllers
-
A read-only space separated values file which exists on all
cgroups.
@@ -651,7 +677,6 @@ All cgroup core files are prefixed with "cgroup."
the cgroup. The controllers are not ordered.
cgroup.subtree_control
-
A read-write space separated values file which exists on all
cgroups. Starts out empty.
@@ -667,23 +692,25 @@ All cgroup core files are prefixed with "cgroup."
operations are specified, either all succeed or all fail.
cgroup.events
-
A read-only flat-keyed file which exists on non-root cgroups.
The following entries are defined. Unless specified
otherwise, a value change in this file generates a file
modified event.
populated
-
1 if the cgroup or its descendants contains any live
processes; otherwise, 0.
-5. Controllers
+Controllers
+===========
-5-1. CPU
+CPU
+---
-[NOTE: The interface for the cpu controller hasn't been merged yet]
+.. note::
+
+ The interface for the cpu controller hasn't been merged yet
The "cpu" controllers regulates distribution of CPU cycles. This
controller implements weight and absolute bandwidth limit models for
@@ -691,36 +718,34 @@ normal scheduling policy and absolute bandwidth allocation model for
realtime scheduling policy.
-5-1-1. CPU Interface Files
+CPU Interface Files
+~~~~~~~~~~~~~~~~~~~
All time durations are in microseconds.
cpu.stat
-
A read-only flat-keyed file which exists on non-root cgroups.
- It reports the following six stats.
+ It reports the following six stats:
- usage_usec
- user_usec
- system_usec
- nr_periods
- nr_throttled
- throttled_usec
+ - usage_usec
+ - user_usec
+ - system_usec
+ - nr_periods
+ - nr_throttled
+ - throttled_usec
cpu.weight
-
A read-write single value file which exists on non-root
cgroups. The default is "100".
The weight in the range [1, 10000].
cpu.max
-
A read-write two value file which exists on non-root cgroups.
The default is "max 100000".
- The maximum bandwidth limit. It's in the following format.
+ The maximum bandwidth limit. It's in the following format::
$MAX $PERIOD
@@ -729,9 +754,10 @@ All time durations are in microseconds.
one number is written, $MAX is updated.
cpu.rt.max
+ .. note::
- [NOTE: The semantics of this file is still under discussion and the
- interface hasn't been merged yet]
+ The semantics of this file is still under discussion and the
+ interface hasn't been merged yet
A read-write two value file which exists on all cgroups.
The default is "0 100000".
@@ -739,7 +765,7 @@ All time durations are in microseconds.
The maximum realtime runtime allocation. Over-committing
configurations are disallowed and process migrations are
rejected if not enough bandwidth is available. It's in the
- following format.
+ following format::
$MAX $PERIOD
@@ -748,7 +774,8 @@ All time durations are in microseconds.
updated.
-5-2. Memory
+Memory
+------
The "memory" controller regulates distribution of memory. Memory is
stateful and implements both limit and protection models. Due to the
@@ -770,14 +797,14 @@ following types of memory usages are tracked.
The above list may expand in the future for better coverage.
-5-2-1. Memory Interface Files
+Memory Interface Files
+~~~~~~~~~~~~~~~~~~~~~~
All memory amounts are in bytes. If a value which is not aligned to
PAGE_SIZE is written, the value may be rounded up to the closest
PAGE_SIZE multiple when read back.
memory.current
-
A read-only single value file which exists on non-root
cgroups.
@@ -785,7 +812,6 @@ PAGE_SIZE multiple when read back.
and its descendants.
memory.low
-
A read-write single value file which exists on non-root
cgroups. The default is "0".
@@ -798,7 +824,6 @@ PAGE_SIZE multiple when read back.
protection is discouraged.
memory.high
-
A read-write single value file which exists on non-root
cgroups. The default is "max".
@@ -811,7 +836,6 @@ PAGE_SIZE multiple when read back.
under extreme conditions the limit may be breached.
memory.max
-
A read-write single value file which exists on non-root
cgroups. The default is "max".
@@ -826,21 +850,18 @@ PAGE_SIZE multiple when read back.
utility is limited to providing the final safety net.
memory.events
-
A read-only flat-keyed file which exists on non-root cgroups.
The following entries are defined. Unless specified
otherwise, a value change in this file generates a file
modified event.
low
-
The number of times the cgroup is reclaimed due to
high memory pressure even though its usage is under
the low boundary. This usually indicates that the low
boundary is over-committed.
high
-
The number of times processes of the cgroup are
throttled and routed to perform direct memory reclaim
because the high memory boundary was exceeded. For a
@@ -849,13 +870,11 @@ PAGE_SIZE multiple when read back.
occurrences are expected.
max
-
The number of times the cgroup's memory usage was
about to go over the max boundary. If direct reclaim
fails to bring it down, the cgroup goes to OOM state.
oom
-
The number of time the cgroup's memory usage was
reached the limit and allocation was about to fail.
@@ -864,16 +883,14 @@ PAGE_SIZE multiple when read back.
Failed allocation in its turn could be returned into
userspace as -ENOMEM or siletly ignored in cases like
- disk readahead. For now OOM in memory cgroup kills
+ disk readahead. For now OOM in memory cgroup kills
tasks iff shortage has happened inside page fault.
oom_kill
-
The number of processes belonging to this cgroup
killed by any kind of OOM killer.
memory.stat
-
A read-only flat-keyed file which exists on non-root cgroups.
This breaks down the cgroup's memory footprint into different
@@ -887,73 +904,55 @@ PAGE_SIZE multiple when read back.
fixed position; use the keys to look up specific values!
anon
-
Amount of memory used in anonymous mappings such as
brk(), sbrk(), and mmap(MAP_ANONYMOUS)
file
-
Amount of memory used to cache filesystem data,
including tmpfs and shared memory.
kernel_stack
-
Amount of memory allocated to kernel stacks.
slab
-
Amount of memory used for storing in-kernel data
structures.
sock
-
Amount of memory used in network transmission buffers
shmem
-
Amount of cached filesystem data that is swap-backed,
such as tmpfs, shm segments, shared anonymous mmap()s
file_mapped
-
Amount of cached filesystem data mapped with mmap()
file_dirty
-
Amount of cached filesystem data that was modified but
not yet written back to disk
file_writeback
-
Amount of cached filesystem data that was modified and
is currently being written back to disk
- inactive_anon
- active_anon
- inactive_file
- active_file
- unevictable
-
+ inactive_anon, active_anon, inactive_file, active_file, unevictable
Amount of memory, swap-backed and filesystem-backed,
on the internal memory management lists used by the
page reclaim algorithm
slab_reclaimable
-
Part of "slab" that might be reclaimed, such as
dentries and inodes.
slab_unreclaimable
-
Part of "slab" that cannot be reclaimed on memory
pressure.
pgfault
-
Total number of page faults incurred
pgmajfault
-
Number of major page faults incurred
workingset_refault
@@ -997,7 +996,6 @@ PAGE_SIZE multiple when read back.
Amount of reclaimed lazyfree pages
memory.swap.current
-
A read-only single value file which exists on non-root
cgroups.
@@ -1005,7 +1003,6 @@ PAGE_SIZE multiple when read back.
and its descendants.
memory.swap.max
-
A read-write single value file which exists on non-root
cgroups. The default is "max".
@@ -1013,7 +1010,8 @@ PAGE_SIZE multiple when read back.
limit, anonymous meomry of the cgroup will not be swapped out.
-5-2-2. Usage Guidelines
+Usage Guidelines
+~~~~~~~~~~~~~~~~
"memory.high" is the main mechanism to control memory usage.
Over-committing on high limit (sum of high limits > available memory)
@@ -1036,7 +1034,8 @@ memory; unfortunately, memory pressure monitoring mechanism isn't
implemented yet.
-5-2-3. Memory Ownership
+Memory Ownership
+~~~~~~~~~~~~~~~~
A memory area is charged to the cgroup which instantiated it and stays
charged to the cgroup until the area is released. Migrating a process
@@ -1054,7 +1053,8 @@ POSIX_FADV_DONTNEED to relinquish the ownership of memory areas
belonging to the affected files to ensure correct memory ownership.
-5-3. IO
+IO
+--
The "io" controller regulates the distribution of IO resources. This
controller implements both weight based and absolute bandwidth or IOPS
@@ -1063,28 +1063,29 @@ only if cfq-iosched is in use and neither scheme is available for
blk-mq devices.
-5-3-1. IO Interface Files
+IO Interface Files
+~~~~~~~~~~~~~~~~~~
io.stat
-
A read-only nested-keyed file which exists on non-root
cgroups.
Lines are keyed by $MAJ:$MIN device numbers and not ordered.
The following nested keys are defined.
+ ====== ===================
rbytes Bytes read
wbytes Bytes written
rios Number of read IOs
wios Number of write IOs
+ ====== ===================
- An example read output follows.
+ An example read output follows:
8:16 rbytes=1459200 wbytes=314773504 rios=192 wios=353
8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252
io.weight
-
A read-write flat-keyed file which exists on non-root cgroups.
The default is "default 100".
@@ -1098,14 +1099,13 @@ blk-mq devices.
$WEIGHT" or simply "$WEIGHT". Overrides can be set by writing
"$MAJ:$MIN $WEIGHT" and unset by writing "$MAJ:$MIN default".
- An example read output follows.
+ An example read output follows::
default 100
8:16 200
8:0 50
io.max
-
A read-write nested-keyed file which exists on non-root
cgroups.
@@ -1113,10 +1113,12 @@ blk-mq devices.
device numbers and not ordered. The following nested keys are
defined.
+ ===== ==================================
rbps Max read bytes per second
wbps Max write bytes per second
riops Max read IO operations per second
wiops Max write IO operations per second
+ ===== ==================================
When writing, any number of nested key-value pairs can be
specified in any order. "max" can be specified as the value
@@ -1126,24 +1128,25 @@ blk-mq devices.
BPS and IOPS are measured in each IO direction and IOs are
delayed if limit is reached. Temporary bursts are allowed.
- Setting read limit at 2M BPS and write at 120 IOPS for 8:16.
+ Setting read limit at 2M BPS and write at 120 IOPS for 8:16::
echo "8:16 rbps=2097152 wiops=120" > io.max
- Reading returns the following.
+ Reading returns the following::
8:16 rbps=2097152 wbps=max riops=max wiops=120
- Write IOPS limit can be removed by writing the following.
+ Write IOPS limit can be removed by writing the following::
echo "8:16 wiops=max" > io.max
- Reading now returns the following.
+ Reading now returns the following::
8:16 rbps=2097152 wbps=max riops=max wiops=max
-5-3-2. Writeback
+Writeback
+~~~~~~~~~
Page cache is dirtied through buffered writes and shared mmaps and
written asynchronously to the backing filesystem by the writeback
@@ -1191,22 +1194,19 @@ patterns.
The sysctl knobs which affect writeback behavior are applied to cgroup
writeback as follows.
- vm.dirty_background_ratio
- vm.dirty_ratio
-
+ vm.dirty_background_ratio, vm.dirty_ratio
These ratios apply the same to cgroup writeback with the
amount of available memory capped by limits imposed by the
memory controller and system-wide clean memory.
- vm.dirty_background_bytes
- vm.dirty_bytes
-
+ vm.dirty_background_bytes, vm.dirty_bytes
For cgroup writeback, this is calculated into ratio against
total available memory and applied the same way as
vm.dirty[_background]_ratio.
-5-4. PID
+PID
+---
The process number controller is used to allow a cgroup to stop any
new tasks from being fork()'d or clone()'d after a specified limit is
@@ -1221,17 +1221,16 @@ Note that PIDs used in this controller refer to TIDs, process IDs as
used by the kernel.
-5-4-1. PID Interface Files
+PID Interface Files
+~~~~~~~~~~~~~~~~~~~
pids.max
-
A read-write single value file which exists on non-root
cgroups. The default is "max".
Hard limit of number of processes.
pids.current
-
A read-only single value file which exists on all cgroups.
The number of processes currently in the cgroup and its
@@ -1246,12 +1245,14 @@ through fork() or clone(). These will return -EAGAIN if the creation
of a new process would cause a cgroup policy to be violated.
-5-5. RDMA
+RDMA
+----
The "rdma" controller regulates the distribution and accounting of
of RDMA resources.
-5-5-1. RDMA Interface Files
+RDMA Interface Files
+~~~~~~~~~~~~~~~~~~~~
rdma.max
A readwrite nested-keyed file that exists for all the cgroups
@@ -1264,10 +1265,12 @@ of RDMA resources.
The following nested keys are defined.
+ ========== =============================
hca_handle Maximum number of HCA Handles
hca_object Maximum number of HCA Objects
+ ========== =============================
- An example for mlx4 and ocrdma device follows.
+ An example for mlx4 and ocrdma device follows::
mlx4_0 hca_handle=2 hca_object=2000
ocrdma1 hca_handle=3 hca_object=max
@@ -1276,15 +1279,17 @@ of RDMA resources.
A read-only file that describes current resource usage.
It exists for all the cgroup except root.
- An example for mlx4 and ocrdma device follows.
+ An example for mlx4 and ocrdma device follows::
mlx4_0 hca_handle=1 hca_object=20
ocrdma1 hca_handle=1 hca_object=23
-5-6. Misc
+Misc
+----
-5-6-1. perf_event
+perf_event
+~~~~~~~~~~
perf_event controller, if not mounted on a legacy hierarchy, is
automatically enabled on the v2 hierarchy so that perf events can
@@ -1292,9 +1297,11 @@ always be filtered by cgroup v2 path. The controller can still be
moved to a legacy hierarchy after v2 hierarchy is populated.
-6. Namespace
+Namespace
+=========
-6-1. Basics
+Basics
+------
cgroup namespace provides a mechanism to virtualize the view of the
"/proc/$PID/cgroup" file and cgroup mounts. The CLONE_NEWCGROUP clone
@@ -1308,7 +1315,7 @@ Without cgroup namespace, the "/proc/$PID/cgroup" file shows the
complete path of the cgroup of a process. In a container setup where
a set of cgroups and namespaces are intended to isolate processes the
"/proc/$PID/cgroup" file may leak potential system level information
-to the isolated processes. For Example:
+to the isolated processes. For Example::
# cat /proc/self/cgroup
0::/batchjobs/container_id1
@@ -1316,14 +1323,14 @@ to the isolated processes. For Example:
The path '/batchjobs/container_id1' can be considered as system-data
and undesirable to expose to the isolated processes. cgroup namespace
can be used to restrict visibility of this path. For example, before
-creating a cgroup namespace, one would see:
+creating a cgroup namespace, one would see::
# ls -l /proc/self/ns/cgroup
lrwxrwxrwx 1 root root 0 2014-07-15 10:37 /proc/self/ns/cgroup -> cgroup:[4026531835]
# cat /proc/self/cgroup
0::/batchjobs/container_id1
-After unsharing a new namespace, the view changes.
+After unsharing a new namespace, the view changes::
# ls -l /proc/self/ns/cgroup
lrwxrwxrwx 1 root root 0 2014-07-15 10:35 /proc/self/ns/cgroup -> cgroup:[4026532183]
@@ -1341,7 +1348,8 @@ namespace is destroyed. The cgroupns root and the actual cgroups
remain.
-6-2. The Root and Views
+The Root and Views
+------------------
The 'cgroupns root' for a cgroup namespace is the cgroup in which the
process calling unshare(2) is running. For example, if a process in
@@ -1350,7 +1358,7 @@ process calling unshare(2) is running. For example, if a process in
init_cgroup_ns, this is the real root ('/') cgroup.
The cgroupns root cgroup does not change even if the namespace creator
-process later moves to a different cgroup.
+process later moves to a different cgroup::
# ~/unshare -c # unshare cgroupns in some cgroup
# cat /proc/self/cgroup
@@ -1364,7 +1372,7 @@ Each process gets its namespace-specific view of "/proc/$PID/cgroup"
Processes running inside the cgroup namespace will be able to see
cgroup paths (in /proc/self/cgroup) only inside their root cgroup.
-From within an unshared cgroupns:
+From within an unshared cgroupns::
# sleep 100000 &
[1] 7353
@@ -1373,7 +1381,7 @@ From within an unshared cgroupns:
0::/sub_cgrp_1
From the initial cgroup namespace, the real cgroup path will be
-visible:
+visible::
$ cat /proc/7353/cgroup
0::/batchjobs/container_id1/sub_cgrp_1
@@ -1381,7 +1389,7 @@ visible:
From a sibling cgroup namespace (that is, a namespace rooted at a
different cgroup), the cgroup path relative to its own cgroup
namespace root will be shown. For instance, if PID 7353's cgroup
-namespace root is at '/batchjobs/container_id2', then it will see
+namespace root is at '/batchjobs/container_id2', then it will see::
# cat /proc/7353/cgroup
0::/../container_id2/sub_cgrp_1
@@ -1390,13 +1398,14 @@ Note that the relative path always starts with '/' to indicate that
its relative to the cgroup namespace root of the caller.
-6-3. Migration and setns(2)
+Migration and setns(2)
+----------------------
Processes inside a cgroup namespace can move into and out of the
namespace root if they have proper access to external cgroups. For
example, from inside a namespace with cgroupns root at
/batchjobs/container_id1, and assuming that the global hierarchy is
-still accessible inside cgroupns:
+still accessible inside cgroupns::
# cat /proc/7353/cgroup
0::/sub_cgrp_1
@@ -1418,10 +1427,11 @@ namespace. It is expected that the someone moves the attaching
process under the target cgroup namespace root.
-6-4. Interaction with Other Namespaces
+Interaction with Other Namespaces
+---------------------------------
Namespace specific cgroup hierarchy can be mounted by a process
-running inside a non-init cgroup namespace.
+running inside a non-init cgroup namespace::
# mount -t cgroup2 none $MOUNT_POINT
@@ -1434,27 +1444,27 @@ the view of cgroup hierarchy by namespace-private cgroupfs mount
provides a properly isolated cgroup view inside the container.
-P. Information on Kernel Programming
+Information on Kernel Programming
+=================================
This section contains kernel programming information in the areas
where interacting with cgroup is necessary. cgroup core and
controllers are not covered.
-P-1. Filesystem Support for Writeback
+Filesystem Support for Writeback
+--------------------------------
A filesystem can support cgroup writeback by updating
address_space_operations->writepage[s]() to annotate bio's using the
following two functions.
wbc_init_bio(@wbc, @bio)
-
Should be called for each bio carrying writeback data and
associates the bio with the inode's owner cgroup. Can be
called anytime between bio allocation and submission.
wbc_account_io(@wbc, @page, @bytes)
-
Should be called for each data segment being written out.
While this function doesn't care exactly when it's called
during the writeback session, it's the easiest and most
@@ -1475,7 +1485,8 @@ cases by skipping wbc_init_bio() or using bio_associate_blkcg()
directly.
-D. Deprecated v1 Core Features
+Deprecated v1 Core Features
+===========================
- Multiple hierarchies including named ones are not supported.
@@ -1489,9 +1500,11 @@ D. Deprecated v1 Core Features
at the root instead.
-R. Issues with v1 and Rationales for v2
+Issues with v1 and Rationales for v2
+====================================
-R-1. Multiple Hierarchies
+Multiple Hierarchies
+--------------------
cgroup v1 allowed an arbitrary number of hierarchies and each
hierarchy could host any number of controllers. While this seemed to
@@ -1543,7 +1556,8 @@ how memory is distributed beyond a certain level while still wanting
to control how CPU cycles are distributed.
-R-2. Thread Granularity
+Thread Granularity
+------------------
cgroup v1 allowed threads of a process to belong to different cgroups.
This didn't make sense for some controllers and those controllers
@@ -1586,7 +1600,8 @@ misbehaving and poorly abstracted interfaces and kernel exposing and
locked into constructs inadvertently.
-R-3. Competition Between Inner Nodes and Threads
+Competition Between Inner Nodes and Threads
+-------------------------------------------
cgroup v1 allowed threads to be in any cgroups which created an
interesting problem where threads belonging to a parent cgroup and its
@@ -1605,7 +1620,7 @@ simply weren't available for threads.
The io controller implicitly created a hidden leaf node for each
cgroup to host the threads. The hidden leaf had its own copies of all
-the knobs with "leaf_" prefixed. While this allowed equivalent
+the knobs with ``leaf_`` prefixed. While this allowed equivalent
control over internal threads, it was with serious drawbacks. It
always added an extra layer of nesting which wouldn't be necessary
otherwise, made the interface messy and significantly complicated the
@@ -1626,7 +1641,8 @@ This clearly is a problem which needs to be addressed from cgroup core
in a uniform way.
-R-4. Other Interface Issues
+Other Interface Issues
+----------------------
cgroup v1 grew without oversight and developed a large number of
idiosyncrasies and inconsistencies. One issue on the cgroup core side
@@ -1654,9 +1670,11 @@ cgroup v2 establishes common conventions where appropriate and updates
controllers so that they expose minimal and consistent interfaces.
-R-5. Controller Issues and Remedies
+Controller Issues and Remedies
+------------------------------
-R-5-1. Memory
+Memory
+~~~~~~
The original lower boundary, the soft limit, is defined as a limit
that is per default unset. As a result, the set of cgroups that
diff --git a/Documentation/circular-buffers.txt b/Documentation/circular-buffers.txt
index 4a824d232472..d4628174b7c5 100644
--- a/Documentation/circular-buffers.txt
+++ b/Documentation/circular-buffers.txt
@@ -1,9 +1,9 @@
- ================
- CIRCULAR BUFFERS
- ================
+================
+Circular Buffers
+================
-By: David Howells <dhowells@redhat.com>
- Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+:Author: David Howells <dhowells@redhat.com>
+:Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Linux provides a number of features that can be used to implement circular
@@ -20,7 +20,7 @@ producer and just one consumer. It is possible to handle multiple producers by
serialising them, and to handle multiple consumers by serialising them.
-Contents:
+.. Contents:
(*) What is a circular buffer?
@@ -31,8 +31,8 @@ Contents:
- The consumer.
-==========================
-WHAT IS A CIRCULAR BUFFER?
+
+What is a circular buffer?
==========================
First of all, what is a circular buffer? A circular buffer is a buffer of
@@ -60,9 +60,7 @@ buffer, provided that neither index overtakes the other. The implementer must
be careful, however, as a region more than one unit in size may wrap the end of
the buffer and be broken into two segments.
-
-============================
-MEASURING POWER-OF-2 BUFFERS
+Measuring power-of-2 buffers
============================
Calculation of the occupancy or the remaining capacity of an arbitrarily sized
@@ -71,13 +69,13 @@ modulus (divide) instruction. However, if the buffer is of a power-of-2 size,
then a much quicker bitwise-AND instruction can be used instead.
Linux provides a set of macros for handling power-of-2 circular buffers. These
-can be made use of by:
+can be made use of by::
#include <linux/circ_buf.h>
The macros are:
- (*) Measure the remaining capacity of a buffer:
+ (#) Measure the remaining capacity of a buffer::
CIRC_SPACE(head_index, tail_index, buffer_size);
@@ -85,7 +83,7 @@ The macros are:
can be inserted.
- (*) Measure the maximum consecutive immediate space in a buffer:
+ (#) Measure the maximum consecutive immediate space in a buffer::
CIRC_SPACE_TO_END(head_index, tail_index, buffer_size);
@@ -94,14 +92,14 @@ The macros are:
beginning of the buffer.
- (*) Measure the occupancy of a buffer:
+ (#) Measure the occupancy of a buffer::
CIRC_CNT(head_index, tail_index, buffer_size);
This returns the number of items currently occupying a buffer[2].
- (*) Measure the non-wrapping occupancy of a buffer:
+ (#) Measure the non-wrapping occupancy of a buffer::
CIRC_CNT_TO_END(head_index, tail_index, buffer_size);
@@ -112,7 +110,7 @@ The macros are:
Each of these macros will nominally return a value between 0 and buffer_size-1,
however:
- [1] CIRC_SPACE*() are intended to be used in the producer. To the producer
+ (1) CIRC_SPACE*() are intended to be used in the producer. To the producer
they will return a lower bound as the producer controls the head index,
but the consumer may still be depleting the buffer on another CPU and
moving the tail index.
@@ -120,7 +118,7 @@ however:
To the consumer it will show an upper bound as the producer may be busy
depleting the space.
- [2] CIRC_CNT*() are intended to be used in the consumer. To the consumer they
+ (2) CIRC_CNT*() are intended to be used in the consumer. To the consumer they
will return a lower bound as the consumer controls the tail index, but the
producer may still be filling the buffer on another CPU and moving the
head index.
@@ -128,14 +126,12 @@ however:
To the producer it will show an upper bound as the consumer may be busy
emptying the buffer.
- [3] To a third party, the order in which the writes to the indices by the
+ (3) To a third party, the order in which the writes to the indices by the
producer and consumer become visible cannot be guaranteed as they are
independent and may be made on different CPUs - so the result in such a
situation will merely be a guess, and may even be negative.
-
-===========================================
-USING MEMORY BARRIERS WITH CIRCULAR BUFFERS
+Using memory barriers with circular buffers
===========================================
By using memory barriers in conjunction with circular buffers, you can avoid
@@ -152,10 +148,10 @@ time, and only one thing should be emptying a buffer at any one time, but the
two sides can operate simultaneously.
-THE PRODUCER
+The producer
------------
-The producer will look something like this:
+The producer will look something like this::
spin_lock(&producer_lock);
@@ -193,10 +189,10 @@ ordering between the read of the index indicating that the consumer has
vacated a given element and the write by the producer to that same element.
-THE CONSUMER
+The Consumer
------------
-The consumer will look something like this:
+The consumer will look something like this::
spin_lock(&consumer_lock);
@@ -235,8 +231,7 @@ prevents the compiler from tearing the store, and enforces ordering
against previous accesses.
-===============
-FURTHER READING
+Further reading
===============
See also Documentation/memory-barriers.txt for a description of Linux's memory
diff --git a/Documentation/clk.txt b/Documentation/clk.txt
index 22f026aa2f34..be909ed45970 100644
--- a/Documentation/clk.txt
+++ b/Documentation/clk.txt
@@ -1,12 +1,16 @@
- The Common Clk Framework
- Mike Turquette <mturquette@ti.com>
+========================
+The Common Clk Framework
+========================
+
+:Author: Mike Turquette <mturquette@ti.com>
This document endeavours to explain the common clk framework details,
and how to port a platform over to this framework. It is not yet a
detailed explanation of the clock api in include/linux/clk.h, but
perhaps someday it will include that information.
- Part 1 - introduction and interface split
+Introduction and interface split
+================================
The common clk framework is an interface to control the clock nodes
available on various devices today. This may come in the form of clock
@@ -35,10 +39,11 @@ is defined in struct clk_foo and pointed to within struct clk_core. This
allows for easy navigation between the two discrete halves of the common
clock interface.
- Part 2 - common data structures and api
+Common data structures and api
+==============================
Below is the common struct clk_core definition from
-drivers/clk/clk.c, modified for brevity:
+drivers/clk/clk.c, modified for brevity::
struct clk_core {
const char *name;
@@ -59,7 +64,7 @@ struct clk. That api is documented in include/linux/clk.h.
Platforms and devices utilizing the common struct clk_core use the struct
clk_ops pointer in struct clk_core to perform the hardware-specific parts of
-the operations defined in clk-provider.h:
+the operations defined in clk-provider.h::
struct clk_ops {
int (*prepare)(struct clk_hw *hw);
@@ -95,19 +100,20 @@ the operations defined in clk-provider.h:
struct dentry *dentry);
};
- Part 3 - hardware clk implementations
+Hardware clk implementations
+============================
The strength of the common struct clk_core comes from its .ops and .hw pointers
which abstract the details of struct clk from the hardware-specific bits, and
vice versa. To illustrate consider the simple gateable clk implementation in
-drivers/clk/clk-gate.c:
+drivers/clk/clk-gate.c::
-struct clk_gate {
- struct clk_hw hw;
- void __iomem *reg;
- u8 bit_idx;
- ...
-};
+ struct clk_gate {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 bit_idx;
+ ...
+ };
struct clk_gate contains struct clk_hw hw as well as hardware-specific
knowledge about which register and bit controls this clk's gating.
@@ -115,7 +121,7 @@ Nothing about clock topology or accounting, such as enable_count or
notifier_count, is needed here. That is all handled by the common
framework code and struct clk_core.
-Let's walk through enabling this clk from driver code:
+Let's walk through enabling this clk from driver code::
struct clk *clk;
clk = clk_get(NULL, "my_gateable_clk");
@@ -123,70 +129,71 @@ Let's walk through enabling this clk from driver code:
clk_prepare(clk);
clk_enable(clk);
-The call graph for clk_enable is very simple:
+The call graph for clk_enable is very simple::
-clk_enable(clk);
- clk->ops->enable(clk->hw);
- [resolves to...]
- clk_gate_enable(hw);
- [resolves struct clk gate with to_clk_gate(hw)]
- clk_gate_set_bit(gate);
+ clk_enable(clk);
+ clk->ops->enable(clk->hw);
+ [resolves to...]
+ clk_gate_enable(hw);
+ [resolves struct clk gate with to_clk_gate(hw)]
+ clk_gate_set_bit(gate);
-And the definition of clk_gate_set_bit:
+And the definition of clk_gate_set_bit::
-static void clk_gate_set_bit(struct clk_gate *gate)
-{
- u32 reg;
+ static void clk_gate_set_bit(struct clk_gate *gate)
+ {
+ u32 reg;
- reg = __raw_readl(gate->reg);
- reg |= BIT(gate->bit_idx);
- writel(reg, gate->reg);
-}
+ reg = __raw_readl(gate->reg);
+ reg |= BIT(gate->bit_idx);
+ writel(reg, gate->reg);
+ }
-Note that to_clk_gate is defined as:
+Note that to_clk_gate is defined as::
-#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+ #define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
This pattern of abstraction is used for every clock hardware
representation.
- Part 4 - supporting your own clk hardware
+Supporting your own clk hardware
+================================
When implementing support for a new type of clock it is only necessary to
-include the following header:
+include the following header::
-#include <linux/clk-provider.h>
+ #include <linux/clk-provider.h>
To construct a clk hardware structure for your platform you must define
-the following:
+the following::
-struct clk_foo {
- struct clk_hw hw;
- ... hardware specific data goes here ...
-};
+ struct clk_foo {
+ struct clk_hw hw;
+ ... hardware specific data goes here ...
+ };
To take advantage of your data you'll need to support valid operations
-for your clk:
+for your clk::
-struct clk_ops clk_foo_ops {
- .enable = &clk_foo_enable;
- .disable = &clk_foo_disable;
-};
+ struct clk_ops clk_foo_ops {
+ .enable = &clk_foo_enable;
+ .disable = &clk_foo_disable;
+ };
-Implement the above functions using container_of:
+Implement the above functions using container_of::
-#define to_clk_foo(_hw) container_of(_hw, struct clk_foo, hw)
+ #define to_clk_foo(_hw) container_of(_hw, struct clk_foo, hw)
-int clk_foo_enable(struct clk_hw *hw)
-{
- struct clk_foo *foo;
+ int clk_foo_enable(struct clk_hw *hw)
+ {
+ struct clk_foo *foo;
- foo = to_clk_foo(hw);
+ foo = to_clk_foo(hw);
- ... perform magic on foo ...
+ ... perform magic on foo ...
- return 0;
-};
+ return 0;
+ };
Below is a matrix detailing which clk_ops are mandatory based upon the
hardware capabilities of that clock. A cell marked as "y" means
@@ -194,41 +201,56 @@ mandatory, a cell marked as "n" implies that either including that
callback is invalid or otherwise unnecessary. Empty cells are either
optional or must be evaluated on a case-by-case basis.
- clock hardware characteristics
- -----------------------------------------------------------
- | gate | change rate | single parent | multiplexer | root |
- |------|-------------|---------------|-------------|------|
-.prepare | | | | | |
-.unprepare | | | | | |
- | | | | | |
-.enable | y | | | | |
-.disable | y | | | | |
-.is_enabled | y | | | | |
- | | | | | |
-.recalc_rate | | y | | | |
-.round_rate | | y [1] | | | |
-.determine_rate | | y [1] | | | |
-.set_rate | | y | | | |
- | | | | | |
-.set_parent | | | n | y | n |
-.get_parent | | | n | y | n |
- | | | | | |
-.recalc_accuracy| | | | | |
- | | | | | |
-.init | | | | | |
- -----------------------------------------------------------
-[1] either one of round_rate or determine_rate is required.
+.. table:: clock hardware characteristics
+
+ +----------------+------+-------------+---------------+-------------+------+
+ | | gate | change rate | single parent | multiplexer | root |
+ +================+======+=============+===============+=============+======+
+ |.prepare | | | | | |
+ +----------------+------+-------------+---------------+-------------+------+
+ |.unprepare | | | | | |
+ +----------------+------+-------------+---------------+-------------+------+
+ +----------------+------+-------------+---------------+-------------+------+
+ |.enable | y | | | | |
+ +----------------+------+-------------+---------------+-------------+------+
+ |.disable | y | | | | |
+ +----------------+------+-------------+---------------+-------------+------+
+ |.is_enabled | y | | | | |
+ +----------------+------+-------------+---------------+-------------+------+
+ +----------------+------+-------------+---------------+-------------+------+
+ |.recalc_rate | | y | | | |
+ +----------------+------+-------------+---------------+-------------+------+
+ |.round_rate | | y [1]_ | | | |
+ +----------------+------+-------------+---------------+-------------+------+
+ |.determine_rate | | y [1]_ | | | |
+ +----------------+------+-------------+---------------+-------------+------+
+ |.set_rate | | y | | | |
+ +----------------+------+-------------+---------------+-------------+------+
+ +----------------+------+-------------+---------------+-------------+------+
+ |.set_parent | | | n | y | n |
+ +----------------+------+-------------+---------------+-------------+------+
+ |.get_parent | | | n | y | n |
+ +----------------+------+-------------+---------------+-------------+------+
+ +----------------+------+-------------+---------------+-------------+------+
+ |.recalc_accuracy| | | | | |
+ +----------------+------+-------------+---------------+-------------+------+
+ +----------------+------+-------------+---------------+-------------+------+
+ |.init | | | | | |
+ +----------------+------+-------------+---------------+-------------+------+
+
+.. [1] either one of round_rate or determine_rate is required.
Finally, register your clock at run-time with a hardware-specific
registration function. This function simply populates struct clk_foo's
data and then passes the common struct clk parameters to the framework
-with a call to:
+with a call to::
-clk_register(...)
+ clk_register(...)
-See the basic clock types in drivers/clk/clk-*.c for examples.
+See the basic clock types in ``drivers/clk/clk-*.c`` for examples.
- Part 5 - Disabling clock gating of unused clocks
+Disabling clock gating of unused clocks
+=======================================
Sometimes during development it can be useful to be able to bypass the
default disabling of unused clocks. For example, if drivers aren't enabling
@@ -239,7 +261,8 @@ are sorted out.
To bypass this disabling, include "clk_ignore_unused" in the bootargs to the
kernel.
- Part 6 - Locking
+Locking
+=======
The common clock framework uses two global locks, the prepare lock and the
enable lock.
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 9ec8488319dc..17b00914c6ab 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -114,7 +114,7 @@ The Slab Cache
User Space Memory Access
------------------------
-.. kernel-doc:: arch/x86/include/asm/uaccess_32.h
+.. kernel-doc:: arch/x86/include/asm/uaccess.h
:internal:
.. kernel-doc:: arch/x86/lib/usercopy_32.c
diff --git a/Documentation/cpu-load.txt b/Documentation/cpu-load.txt
index 287224e57cfc..2d01ce43d2a2 100644
--- a/Documentation/cpu-load.txt
+++ b/Documentation/cpu-load.txt
@@ -1,9 +1,10 @@
+========
CPU load
---------
+========
-Linux exports various bits of information via `/proc/stat' and
-`/proc/uptime' that userland tools, such as top(1), use to calculate
-the average time system spent in a particular state, for example:
+Linux exports various bits of information via ``/proc/stat`` and
+``/proc/uptime`` that userland tools, such as top(1), use to calculate
+the average time system spent in a particular state, for example::
$ iostat
Linux 2.6.18.3-exp (linmac) 02/20/2007
@@ -17,7 +18,7 @@ Here the system thinks that over the default sampling period the
system spent 10.01% of the time doing work in user space, 2.92% in the
kernel, and was overall 81.63% of the time idle.
-In most cases the `/proc/stat' information reflects the reality quite
+In most cases the ``/proc/stat`` information reflects the reality quite
closely, however due to the nature of how/when the kernel collects
this data sometimes it can not be trusted at all.
@@ -33,78 +34,78 @@ Example
-------
If we imagine the system with one task that periodically burns cycles
-in the following manner:
+in the following manner::
- time line between two timer interrupts
-|--------------------------------------|
- ^ ^
- |_ something begins working |
- |_ something goes to sleep
- (only to be awaken quite soon)
+ time line between two timer interrupts
+ |--------------------------------------|
+ ^ ^
+ |_ something begins working |
+ |_ something goes to sleep
+ (only to be awaken quite soon)
In the above situation the system will be 0% loaded according to the
-`/proc/stat' (since the timer interrupt will always happen when the
+``/proc/stat`` (since the timer interrupt will always happen when the
system is executing the idle handler), but in reality the load is
closer to 99%.
One can imagine many more situations where this behavior of the kernel
-will lead to quite erratic information inside `/proc/stat'.
-
-
-/* gcc -o hog smallhog.c */
-#include <time.h>
-#include <limits.h>
-#include <signal.h>
-#include <sys/time.h>
-#define HIST 10
-
-static volatile sig_atomic_t stop;
-
-static void sighandler (int signr)
-{
- (void) signr;
- stop = 1;
-}
-static unsigned long hog (unsigned long niters)
-{
- stop = 0;
- while (!stop && --niters);
- return niters;
-}
-int main (void)
-{
- int i;
- struct itimerval it = { .it_interval = { .tv_sec = 0, .tv_usec = 1 },
- .it_value = { .tv_sec = 0, .tv_usec = 1 } };
- sigset_t set;
- unsigned long v[HIST];
- double tmp = 0.0;
- unsigned long n;
- signal (SIGALRM, &sighandler);
- setitimer (ITIMER_REAL, &it, NULL);
-
- hog (ULONG_MAX);
- for (i = 0; i < HIST; ++i) v[i] = ULONG_MAX - hog (ULONG_MAX);
- for (i = 0; i < HIST; ++i) tmp += v[i];
- tmp /= HIST;
- n = tmp - (tmp / 3.0);
-
- sigemptyset (&set);
- sigaddset (&set, SIGALRM);
-
- for (;;) {
- hog (n);
- sigwait (&set, &i);
- }
- return 0;
-}
+will lead to quite erratic information inside ``/proc/stat``::
+
+
+ /* gcc -o hog smallhog.c */
+ #include <time.h>
+ #include <limits.h>
+ #include <signal.h>
+ #include <sys/time.h>
+ #define HIST 10
+
+ static volatile sig_atomic_t stop;
+
+ static void sighandler (int signr)
+ {
+ (void) signr;
+ stop = 1;
+ }
+ static unsigned long hog (unsigned long niters)
+ {
+ stop = 0;
+ while (!stop && --niters);
+ return niters;
+ }
+ int main (void)
+ {
+ int i;
+ struct itimerval it = { .it_interval = { .tv_sec = 0, .tv_usec = 1 },
+ .it_value = { .tv_sec = 0, .tv_usec = 1 } };
+ sigset_t set;
+ unsigned long v[HIST];
+ double tmp = 0.0;
+ unsigned long n;
+ signal (SIGALRM, &sighandler);
+ setitimer (ITIMER_REAL, &it, NULL);
+
+ hog (ULONG_MAX);
+ for (i = 0; i < HIST; ++i) v[i] = ULONG_MAX - hog (ULONG_MAX);
+ for (i = 0; i < HIST; ++i) tmp += v[i];
+ tmp /= HIST;
+ n = tmp - (tmp / 3.0);
+
+ sigemptyset (&set);
+ sigaddset (&set, SIGALRM);
+
+ for (;;) {
+ hog (n);
+ sigwait (&set, &i);
+ }
+ return 0;
+ }
References
----------
-http://lkml.org/lkml/2007/2/12/6
-Documentation/filesystems/proc.txt (1.8)
+- http://lkml.org/lkml/2007/2/12/6
+- Documentation/filesystems/proc.txt (1.8)
Thanks
diff --git a/Documentation/cputopology.txt b/Documentation/cputopology.txt
index 127c9d8c2174..c6e7e9196a8b 100644
--- a/Documentation/cputopology.txt
+++ b/Documentation/cputopology.txt
@@ -1,3 +1,6 @@
+===========================================
+How CPU topology info is exported via sysfs
+===========================================
Export CPU topology info via sysfs. Items (attributes) are similar
to /proc/cpuinfo output of some architectures:
@@ -75,24 +78,26 @@ CONFIG_SCHED_BOOK and CONFIG_DRAWER are currently only used on s390, where
they reflect the cpu and cache hierarchy.
For an architecture to support this feature, it must define some of
-these macros in include/asm-XXX/topology.h:
-#define topology_physical_package_id(cpu)
-#define topology_core_id(cpu)
-#define topology_book_id(cpu)
-#define topology_drawer_id(cpu)
-#define topology_sibling_cpumask(cpu)
-#define topology_core_cpumask(cpu)
-#define topology_book_cpumask(cpu)
-#define topology_drawer_cpumask(cpu)
-
-The type of **_id macros is int.
-The type of **_cpumask macros is (const) struct cpumask *. The latter
-correspond with appropriate **_siblings sysfs attributes (except for
+these macros in include/asm-XXX/topology.h::
+
+ #define topology_physical_package_id(cpu)
+ #define topology_core_id(cpu)
+ #define topology_book_id(cpu)
+ #define topology_drawer_id(cpu)
+ #define topology_sibling_cpumask(cpu)
+ #define topology_core_cpumask(cpu)
+ #define topology_book_cpumask(cpu)
+ #define topology_drawer_cpumask(cpu)
+
+The type of ``**_id macros`` is int.
+The type of ``**_cpumask macros`` is ``(const) struct cpumask *``. The latter
+correspond with appropriate ``**_siblings`` sysfs attributes (except for
topology_sibling_cpumask() which corresponds with thread_siblings).
To be consistent on all architectures, include/linux/topology.h
provides default definitions for any of the above macros that are
not defined by include/asm-XXX/topology.h:
+
1) physical_package_id: -1
2) core_id: 0
3) sibling_cpumask: just the given CPU
@@ -107,6 +112,7 @@ Additionally, CPU topology information is provided under
/sys/devices/system/cpu and includes these files. The internal
source for the output is in brackets ("[]").
+ =========== ==========================================================
kernel_max: the maximum CPU index allowed by the kernel configuration.
[NR_CPUS-1]
@@ -122,6 +128,7 @@ source for the output is in brackets ("[]").
present: CPUs that have been identified as being present in the
system. [cpu_present_mask]
+ =========== ==========================================================
The format for the above output is compatible with cpulist_parse()
[see <linux/cpumask.h>]. Some examples follow.
@@ -129,7 +136,7 @@ The format for the above output is compatible with cpulist_parse()
In this example, there are 64 CPUs in the system but cpus 32-63 exceed
the kernel max which is limited to 0..31 by the NR_CPUS config option
being 32. Note also that CPUs 2 and 4-31 are not online but could be
-brought online as they are both present and possible.
+brought online as they are both present and possible::
kernel_max: 31
offline: 2,4-31,32-63
@@ -140,7 +147,7 @@ brought online as they are both present and possible.
In this example, the NR_CPUS config option is 128, but the kernel was
started with possible_cpus=144. There are 4 CPUs in the system and cpu2
was manually taken offline (and is the only CPU that can be brought
-online.)
+online.)::
kernel_max: 127
offline: 2,4-127,128-143
diff --git a/Documentation/crc32.txt b/Documentation/crc32.txt
index a08a7dd9d625..8a6860f33b4e 100644
--- a/Documentation/crc32.txt
+++ b/Documentation/crc32.txt
@@ -1,4 +1,6 @@
-A brief CRC tutorial.
+=================================
+brief tutorial on CRC computation
+=================================
A CRC is a long-division remainder. You add the CRC to the message,
and the whole thing (message+CRC) is a multiple of the given
@@ -8,7 +10,8 @@ remainder computed on the message+CRC is 0. This latter approach
is used by a lot of hardware implementations, and is why so many
protocols put the end-of-frame flag after the CRC.
-It's actually the same long division you learned in school, except that
+It's actually the same long division you learned in school, except that:
+
- We're working in binary, so the digits are only 0 and 1, and
- When dividing polynomials, there are no carries. Rather than add and
subtract, we just xor. Thus, we tend to get a bit sloppy about
@@ -40,11 +43,12 @@ throw the quotient bit away, but subtract the appropriate multiple of
the polynomial from the remainder and we're back to where we started,
ready to process the next bit.
-A big-endian CRC written this way would be coded like:
-for (i = 0; i < input_bits; i++) {
- multiple = remainder & 0x80000000 ? CRCPOLY : 0;
- remainder = (remainder << 1 | next_input_bit()) ^ multiple;
-}
+A big-endian CRC written this way would be coded like::
+
+ for (i = 0; i < input_bits; i++) {
+ multiple = remainder & 0x80000000 ? CRCPOLY : 0;
+ remainder = (remainder << 1 | next_input_bit()) ^ multiple;
+ }
Notice how, to get at bit 32 of the shifted remainder, we look
at bit 31 of the remainder *before* shifting it.
@@ -54,25 +58,26 @@ the remainder don't actually affect any decision-making until
32 bits later. Thus, the first 32 cycles of this are pretty boring.
Also, to add the CRC to a message, we need a 32-bit-long hole for it at
the end, so we have to add 32 extra cycles shifting in zeros at the
-end of every message,
+end of every message.
These details lead to a standard trick: rearrange merging in the
next_input_bit() until the moment it's needed. Then the first 32 cycles
can be precomputed, and merging in the final 32 zero bits to make room
-for the CRC can be skipped entirely. This changes the code to:
+for the CRC can be skipped entirely. This changes the code to::
-for (i = 0; i < input_bits; i++) {
- remainder ^= next_input_bit() << 31;
- multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
- remainder = (remainder << 1) ^ multiple;
-}
+ for (i = 0; i < input_bits; i++) {
+ remainder ^= next_input_bit() << 31;
+ multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
+ remainder = (remainder << 1) ^ multiple;
+ }
-With this optimization, the little-endian code is particularly simple:
-for (i = 0; i < input_bits; i++) {
- remainder ^= next_input_bit();
- multiple = (remainder & 1) ? CRCPOLY : 0;
- remainder = (remainder >> 1) ^ multiple;
-}
+With this optimization, the little-endian code is particularly simple::
+
+ for (i = 0; i < input_bits; i++) {
+ remainder ^= next_input_bit();
+ multiple = (remainder & 1) ? CRCPOLY : 0;
+ remainder = (remainder >> 1) ^ multiple;
+ }
The most significant coefficient of the remainder polynomial is stored
in the least significant bit of the binary "remainder" variable.
@@ -81,23 +86,25 @@ be bit-reversed) and next_input_bit().
As long as next_input_bit is returning the bits in a sensible order, we don't
*have* to wait until the last possible moment to merge in additional bits.
-We can do it 8 bits at a time rather than 1 bit at a time:
-for (i = 0; i < input_bytes; i++) {
- remainder ^= next_input_byte() << 24;
- for (j = 0; j < 8; j++) {
- multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
- remainder = (remainder << 1) ^ multiple;
+We can do it 8 bits at a time rather than 1 bit at a time::
+
+ for (i = 0; i < input_bytes; i++) {
+ remainder ^= next_input_byte() << 24;
+ for (j = 0; j < 8; j++) {
+ multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
+ remainder = (remainder << 1) ^ multiple;
+ }
}
-}
-Or in little-endian:
-for (i = 0; i < input_bytes; i++) {
- remainder ^= next_input_byte();
- for (j = 0; j < 8; j++) {
- multiple = (remainder & 1) ? CRCPOLY : 0;
- remainder = (remainder >> 1) ^ multiple;
+Or in little-endian::
+
+ for (i = 0; i < input_bytes; i++) {
+ remainder ^= next_input_byte();
+ for (j = 0; j < 8; j++) {
+ multiple = (remainder & 1) ? CRCPOLY : 0;
+ remainder = (remainder >> 1) ^ multiple;
+ }
}
-}
If the input is a multiple of 32 bits, you can even XOR in a 32-bit
word at a time and increase the inner loop count to 32.
diff --git a/Documentation/crypto/asymmetric-keys.txt b/Documentation/crypto/asymmetric-keys.txt
index b82b6ad48488..5969bf42562a 100644
--- a/Documentation/crypto/asymmetric-keys.txt
+++ b/Documentation/crypto/asymmetric-keys.txt
@@ -10,6 +10,7 @@ Contents:
- Signature verification.
- Asymmetric key subtypes.
- Instantiation data parsers.
+ - Keyring link restrictions.
========
@@ -318,7 +319,8 @@ KEYRING LINK RESTRICTIONS
=========================
Keyrings created from userspace using add_key can be configured to check the
-signature of the key being linked.
+signature of the key being linked. Keys without a valid signature are not
+allowed to link.
Several restriction methods are available:
@@ -327,9 +329,10 @@ Several restriction methods are available:
- Option string used with KEYCTL_RESTRICT_KEYRING:
- "builtin_trusted"
- The kernel builtin trusted keyring will be searched for the signing
- key. The ca_keys kernel parameter also affects which keys are used for
- signature verification.
+ The kernel builtin trusted keyring will be searched for the signing key.
+ If the builtin trusted keyring is not configured, all links will be
+ rejected. The ca_keys kernel parameter also affects which keys are used
+ for signature verification.
(2) Restrict using the kernel builtin and secondary trusted keyrings
@@ -337,8 +340,10 @@ Several restriction methods are available:
- "builtin_and_secondary_trusted"
The kernel builtin and secondary trusted keyrings will be searched for the
- signing key. The ca_keys kernel parameter also affects which keys are used
- for signature verification.
+ signing key. If the secondary trusted keyring is not configured, this
+ restriction will behave like the "builtin_trusted" option. The ca_keys
+ kernel parameter also affects which keys are used for signature
+ verification.
(3) Restrict using a separate key or keyring
@@ -346,7 +351,7 @@ Several restriction methods are available:
- "key_or_keyring:<key or keyring serial number>[:chain]"
Whenever a key link is requested, the link will only succeed if the key
- being linked is signed by one of the designated keys. This key may be
+ being linked is signed by one of the designated keys. This key may be
specified directly by providing a serial number for one asymmetric key, or
a group of keys may be searched for the signing key by providing the
serial number for a keyring.
@@ -354,7 +359,51 @@ Several restriction methods are available:
When the "chain" option is provided at the end of the string, the keys
within the destination keyring will also be searched for signing keys.
This allows for verification of certificate chains by adding each
- cert in order (starting closest to the root) to one keyring.
+ certificate in order (starting closest to the root) to a keyring. For
+ instance, one keyring can be populated with links to a set of root
+ certificates, with a separate, restricted keyring set up for each
+ certificate chain to be validated:
+
+ # Create and populate a keyring for root certificates
+ root_id=`keyctl add keyring root-certs "" @s`
+ keyctl padd asymmetric "" $root_id < root1.cert
+ keyctl padd asymmetric "" $root_id < root2.cert
+
+ # Create and restrict a keyring for the certificate chain
+ chain_id=`keyctl add keyring chain "" @s`
+ keyctl restrict_keyring $chain_id asymmetric key_or_keyring:$root_id:chain
+
+ # Attempt to add each certificate in the chain, starting with the
+ # certificate closest to the root.
+ keyctl padd asymmetric "" $chain_id < intermediateA.cert
+ keyctl padd asymmetric "" $chain_id < intermediateB.cert
+ keyctl padd asymmetric "" $chain_id < end-entity.cert
+
+ If the final end-entity certificate is successfully added to the "chain"
+ keyring, we can be certain that it has a valid signing chain going back to
+ one of the root certificates.
+
+ A single keyring can be used to verify a chain of signatures by
+ restricting the keyring after linking the root certificate:
+
+ # Create a keyring for the certificate chain and add the root
+ chain2_id=`keyctl add keyring chain2 "" @s`
+ keyctl padd asymmetric "" $chain2_id < root1.cert
+
+ # Restrict the keyring that already has root1.cert linked. The cert
+ # will remain linked by the keyring.
+ keyctl restrict_keyring $chain2_id asymmetric key_or_keyring:0:chain
+
+ # Attempt to add each certificate in the chain, starting with the
+ # certificate closest to the root.
+ keyctl padd asymmetric "" $chain2_id < intermediateA.cert
+ keyctl padd asymmetric "" $chain2_id < intermediateB.cert
+ keyctl padd asymmetric "" $chain2_id < end-entity.cert
+
+ If the final end-entity certificate is successfully added to the "chain2"
+ keyring, we can be certain that there is a valid signing chain going back
+ to the root certificate that was added before the keyring was restricted.
+
In all of these cases, if the signing key is found the signature of the key to
be linked will be verified using the signing key. The requested key is added
diff --git a/Documentation/dcdbas.txt b/Documentation/dcdbas.txt
index e1c52e2dc361..309cc57a7c1c 100644
--- a/Documentation/dcdbas.txt
+++ b/Documentation/dcdbas.txt
@@ -1,4 +1,9 @@
+===================================
+Dell Systems Management Base Driver
+===================================
+
Overview
+========
The Dell Systems Management Base Driver provides a sysfs interface for
systems management software such as Dell OpenManage to perform system
@@ -17,6 +22,7 @@ more information about the libsmbios project.
System Management Interrupt
+===========================
On some Dell systems, systems management software must access certain
management information via a system management interrupt (SMI). The SMI data
@@ -24,12 +30,12 @@ buffer must reside in 32-bit address space, and the physical address of the
buffer is required for the SMI. The driver maintains the memory required for
the SMI and provides a way for the application to generate the SMI.
The driver creates the following sysfs entries for systems management
-software to perform these system management interrupts:
+software to perform these system management interrupts::
-/sys/devices/platform/dcdbas/smi_data
-/sys/devices/platform/dcdbas/smi_data_buf_phys_addr
-/sys/devices/platform/dcdbas/smi_data_buf_size
-/sys/devices/platform/dcdbas/smi_request
+ /sys/devices/platform/dcdbas/smi_data
+ /sys/devices/platform/dcdbas/smi_data_buf_phys_addr
+ /sys/devices/platform/dcdbas/smi_data_buf_size
+ /sys/devices/platform/dcdbas/smi_request
Systems management software must perform the following steps to execute
a SMI using this driver:
@@ -43,6 +49,7 @@ a SMI using this driver:
Host Control Action
+===================
Dell OpenManage supports a host control feature that allows the administrator
to perform a power cycle or power off of the system after the OS has finished
@@ -69,12 +76,14 @@ power off host control action using this driver:
Host Control SMI Type
+=====================
The following table shows the value to write to host_control_smi_type to
perform a power cycle or power off host control action:
+=================== =====================
PowerEdge System Host Control SMI Type
----------------- ---------------------
+=================== =====================
300 HC_SMITYPE_TYPE1
1300 HC_SMITYPE_TYPE1
1400 HC_SMITYPE_TYPE2
@@ -87,5 +96,4 @@ PowerEdge System Host Control SMI Type
1655MC HC_SMITYPE_TYPE2
700 HC_SMITYPE_TYPE3
750 HC_SMITYPE_TYPE3
-
-
+=================== =====================
diff --git a/Documentation/debugging-via-ohci1394.txt b/Documentation/debugging-via-ohci1394.txt
index 9ff026d22b75..981ad4f89fd3 100644
--- a/Documentation/debugging-via-ohci1394.txt
+++ b/Documentation/debugging-via-ohci1394.txt
@@ -1,6 +1,6 @@
-
- Using physical DMA provided by OHCI-1394 FireWire controllers for debugging
- ---------------------------------------------------------------------------
+===========================================================================
+Using physical DMA provided by OHCI-1394 FireWire controllers for debugging
+===========================================================================
Introduction
------------
@@ -91,10 +91,10 @@ Step-by-step instructions for using firescope with early OHCI initialization:
1) Verify that your hardware is supported:
Load the firewire-ohci module and check your kernel logs.
- You should see a line similar to
+ You should see a line similar to::
- firewire_ohci 0000:15:00.1: added OHCI v1.0 device as card 2, 4 IR + 4 IT
- ... contexts, quirks 0x11
+ firewire_ohci 0000:15:00.1: added OHCI v1.0 device as card 2, 4 IR + 4 IT
+ ... contexts, quirks 0x11
when loading the driver. If you have no supported controller, many PCI,
CardBus and even some Express cards which are fully compliant to OHCI-1394
@@ -113,9 +113,9 @@ Step-by-step instructions for using firescope with early OHCI initialization:
stable connection and has matching connectors (there are small 4-pin and
large 6-pin FireWire ports) will do.
- If an driver is running on both machines you should see a line like
+ If an driver is running on both machines you should see a line like::
- firewire_core 0000:15:00.1: created device fw1: GUID 00061b0020105917, S400
+ firewire_core 0000:15:00.1: created device fw1: GUID 00061b0020105917, S400
on both machines in the kernel log when the cable is plugged in
and connects the two machines.
@@ -123,7 +123,7 @@ Step-by-step instructions for using firescope with early OHCI initialization:
3) Test physical DMA using firescope:
On the debug host, make sure that /dev/fw* is accessible,
- then start firescope:
+ then start firescope::
$ firescope
Port 0 (/dev/fw1) opened, 2 nodes detected
@@ -163,7 +163,7 @@ Step-by-step instructions for using firescope with early OHCI initialization:
host loaded, reboot the debugged machine, booting the kernel which has
CONFIG_PROVIDE_OHCI1394_DMA_INIT enabled, with the option ohci1394_dma=early.
- Then, on the debugging host, run firescope, for example by using -A:
+ Then, on the debugging host, run firescope, for example by using -A::
firescope -A System.map-of-debug-target-kernel
@@ -178,6 +178,7 @@ Step-by-step instructions for using firescope with early OHCI initialization:
Notes
-----
+
Documentation and specifications: http://halobates.de/firewire/
FireWire is a trademark of Apple Inc. - for more information please refer to:
diff --git a/Documentation/dell_rbu.txt b/Documentation/dell_rbu.txt
index d262e22bddec..0fdb6aa2704c 100644
--- a/Documentation/dell_rbu.txt
+++ b/Documentation/dell_rbu.txt
@@ -1,18 +1,30 @@
-Purpose:
-Demonstrate the usage of the new open sourced rbu (Remote BIOS Update) driver
+=============================================================
+Usage of the new open sourced rbu (Remote BIOS Update) driver
+=============================================================
+
+Purpose
+=======
+
+Document demonstrating the use of the Dell Remote BIOS Update driver.
for updating BIOS images on Dell servers and desktops.
-Scope:
+Scope
+=====
+
This document discusses the functionality of the rbu driver only.
It does not cover the support needed from applications to enable the BIOS to
update itself with the image downloaded in to the memory.
-Overview:
+Overview
+========
+
This driver works with Dell OpenManage or Dell Update Packages for updating
the BIOS on Dell servers (starting from servers sold since 1999), desktops
and notebooks (starting from those sold in 2005).
+
Please go to http://support.dell.com register and you can find info on
OpenManage and Dell Update packages (DUP).
+
Libsmbios can also be used to update BIOS on Dell systems go to
http://linux.dell.com/libsmbios/ for details.
@@ -22,6 +34,7 @@ of physical pages having the BIOS image. In case of packetized the app
using the driver breaks the image in to packets of fixed sizes and the driver
would place each packet in contiguous physical memory. The driver also
maintains a link list of packets for reading them back.
+
If the dell_rbu driver is unloaded all the allocated memory is freed.
The rbu driver needs to have an application (as mentioned above)which will
@@ -30,28 +43,33 @@ inform the BIOS to enable the update in the next system reboot.
The user should not unload the rbu driver after downloading the BIOS image
or updating.
-The driver load creates the following directories under the /sys file system.
-/sys/class/firmware/dell_rbu/loading
-/sys/class/firmware/dell_rbu/data
-/sys/devices/platform/dell_rbu/image_type
-/sys/devices/platform/dell_rbu/data
-/sys/devices/platform/dell_rbu/packet_size
+The driver load creates the following directories under the /sys file system::
+
+ /sys/class/firmware/dell_rbu/loading
+ /sys/class/firmware/dell_rbu/data
+ /sys/devices/platform/dell_rbu/image_type
+ /sys/devices/platform/dell_rbu/data
+ /sys/devices/platform/dell_rbu/packet_size
The driver supports two types of update mechanism; monolithic and packetized.
These update mechanism depends upon the BIOS currently running on the system.
Most of the Dell systems support a monolithic update where the BIOS image is
copied to a single contiguous block of physical memory.
+
In case of packet mechanism the single memory can be broken in smaller chunks
of contiguous memory and the BIOS image is scattered in these packets.
By default the driver uses monolithic memory for the update type. This can be
changed to packets during the driver load time by specifying the load
-parameter image_type=packet. This can also be changed later as below
-echo packet > /sys/devices/platform/dell_rbu/image_type
+parameter image_type=packet. This can also be changed later as below::
+
+ echo packet > /sys/devices/platform/dell_rbu/image_type
In packet update mode the packet size has to be given before any packets can
-be downloaded. It is done as below
-echo XXXX > /sys/devices/platform/dell_rbu/packet_size
+be downloaded. It is done as below::
+
+ echo XXXX > /sys/devices/platform/dell_rbu/packet_size
+
In the packet update mechanism, the user needs to create a new file having
packets of data arranged back to back. It can be done as follows
The user creates packets header, gets the chunk of the BIOS image and
@@ -60,41 +78,54 @@ added together should match the specified packet_size. This makes one
packet, the user needs to create more such packets out of the entire BIOS
image file and then arrange all these packets back to back in to one single
file.
+
This file is then copied to /sys/class/firmware/dell_rbu/data.
Once this file gets to the driver, the driver extracts packet_size data from
the file and spreads it across the physical memory in contiguous packet_sized
space.
+
This method makes sure that all the packets get to the driver in a single operation.
In monolithic update the user simply get the BIOS image (.hdr file) and copies
to the data file as is without any change to the BIOS image itself.
Do the steps below to download the BIOS image.
+
1) echo 1 > /sys/class/firmware/dell_rbu/loading
2) cp bios_image.hdr /sys/class/firmware/dell_rbu/data
3) echo 0 > /sys/class/firmware/dell_rbu/loading
The /sys/class/firmware/dell_rbu/ entries will remain till the following is
done.
-echo -1 > /sys/class/firmware/dell_rbu/loading
+
+::
+
+ echo -1 > /sys/class/firmware/dell_rbu/loading
+
Until this step is completed the driver cannot be unloaded.
+
Also echoing either mono, packet or init in to image_type will free up the
memory allocated by the driver.
If a user by accident executes steps 1 and 3 above without executing step 2;
it will make the /sys/class/firmware/dell_rbu/ entries disappear.
-The entries can be recreated by doing the following
-echo init > /sys/devices/platform/dell_rbu/image_type
-NOTE: echoing init in image_type does not change it original value.
+
+The entries can be recreated by doing the following::
+
+ echo init > /sys/devices/platform/dell_rbu/image_type
+
+.. note:: echoing init in image_type does not change it original value.
Also the driver provides /sys/devices/platform/dell_rbu/data readonly file to
read back the image downloaded.
-NOTE:
-This driver requires a patch for firmware_class.c which has the modified
-request_firmware_nowait function.
-Also after updating the BIOS image a user mode application needs to execute
-code which sends the BIOS update request to the BIOS. So on the next reboot
-the BIOS knows about the new image downloaded and it updates itself.
-Also don't unload the rbu driver if the image has to be updated.
+.. note::
+
+ This driver requires a patch for firmware_class.c which has the modified
+ request_firmware_nowait function.
+
+ Also after updating the BIOS image a user mode application needs to execute
+ code which sends the BIOS update request to the BIOS. So on the next reboot
+ the BIOS knows about the new image downloaded and it updates itself.
+ Also don't unload the rbu driver if the image has to be updated.
diff --git a/Documentation/devicetree/bindings/clock/img,boston-clock.txt b/Documentation/devicetree/bindings/clock/img,boston-clock.txt
new file mode 100644
index 000000000000..7bc5e9ffb624
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/img,boston-clock.txt
@@ -0,0 +1,31 @@
+Binding for Imagination Technologies MIPS Boston clock sources.
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+The device node must be a child node of the syscon node corresponding to the
+Boston system's platform registers.
+
+Required properties:
+- compatible : Should be "img,boston-clock".
+- #clock-cells : Should be set to 1.
+ Values available for clock consumers can be found in the header file:
+ <dt-bindings/clock/boston-clock.h>
+
+Example:
+
+ system-controller@17ffd000 {
+ compatible = "img,boston-platform-regs", "syscon";
+ reg = <0x17ffd000 0x1000>;
+
+ clk_boston: clock {
+ compatible = "img,boston-clock";
+ #clock-cells = <1>;
+ };
+ };
+
+ uart0: uart@17ffe000 {
+ /* ... */
+ clocks = <&clk_boston BOSTON_CLK_SYS>;
+ };
diff --git a/Documentation/devicetree/bindings/mtd/denali-nand.txt b/Documentation/devicetree/bindings/mtd/denali-nand.txt
index e593bbeb2115..504291d2e5c2 100644
--- a/Documentation/devicetree/bindings/mtd/denali-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/denali-nand.txt
@@ -3,10 +3,23 @@
Required properties:
- compatible : should be one of the following:
"altr,socfpga-denali-nand" - for Altera SOCFPGA
+ "socionext,uniphier-denali-nand-v5a" - for Socionext UniPhier (v5a)
+ "socionext,uniphier-denali-nand-v5b" - for Socionext UniPhier (v5b)
- reg : should contain registers location and length for data and reg.
- reg-names: Should contain the reg names "nand_data" and "denali_reg"
- interrupts : The interrupt number.
+Optional properties:
+ - nand-ecc-step-size: see nand.txt for details. If present, the value must be
+ 512 for "altr,socfpga-denali-nand"
+ 1024 for "socionext,uniphier-denali-nand-v5a"
+ 1024 for "socionext,uniphier-denali-nand-v5b"
+ - nand-ecc-strength: see nand.txt for details. Valid values are:
+ 8, 15 for "altr,socfpga-denali-nand"
+ 8, 16, 24 for "socionext,uniphier-denali-nand-v5a"
+ 8, 16 for "socionext,uniphier-denali-nand-v5b"
+ - nand-ecc-maximize: see nand.txt for details
+
The device tree may optionally contain sub-nodes describing partitions of the
address space. See partition.txt for more detail.
diff --git a/Documentation/devicetree/bindings/mtd/elm.txt b/Documentation/devicetree/bindings/mtd/elm.txt
index 8c1528c421d4..59ddc61c1076 100644
--- a/Documentation/devicetree/bindings/mtd/elm.txt
+++ b/Documentation/devicetree/bindings/mtd/elm.txt
@@ -1,7 +1,7 @@
Error location module
Required properties:
-- compatible: Must be "ti,am33xx-elm"
+- compatible: Must be "ti,am3352-elm"
- reg: physical base address and size of the registers map.
- interrupts: Interrupt number for the elm.
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
index 174f68c26c1b..dd559045593d 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
@@ -5,7 +5,7 @@ the GPMC controller with a name of "nand".
All timing relevant properties as well as generic gpmc child properties are
explained in a separate documents - please refer to
-Documentation/devicetree/bindings/bus/ti-gpmc.txt
+Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
For NAND specific properties such as ECC modes or bus width, please refer to
Documentation/devicetree/bindings/mtd/nand.txt
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nor.txt b/Documentation/devicetree/bindings/mtd/gpmc-nor.txt
index 4828c17bb784..131d3a74d0bd 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-nor.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-nor.txt
@@ -5,7 +5,7 @@ child nodes of the GPMC controller with a name of "nor".
All timing relevant properties as well as generic GPMC child properties are
explained in a separate documents. Please refer to
-Documentation/devicetree/bindings/bus/ti-gpmc.txt
+Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
Required properties:
- bank-width: Width of NOR flash in bytes. GPMC supports 8-bit and
@@ -28,7 +28,7 @@ Required properties:
Optional properties:
- gpmc,XXX Additional GPMC timings and settings parameters. See
- Documentation/devicetree/bindings/bus/ti-gpmc.txt
+ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
Optional properties for partition table parsing:
- #address-cells: should be set to 1
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt b/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
index 5d8fa527c496..b6e8bfd024f4 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
@@ -5,7 +5,7 @@ the GPMC controller with a name of "onenand".
All timing relevant properties as well as generic gpmc child properties are
explained in a separate documents - please refer to
-Documentation/devicetree/bindings/bus/ti-gpmc.txt
+Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
Required properties:
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
index d02acaff3c35..b289ef3c1b7e 100644
--- a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
@@ -4,7 +4,12 @@ The GPMI nand controller provides an interface to control the
NAND flash chips.
Required properties:
- - compatible : should be "fsl,<chip>-gpmi-nand"
+ - compatible : should be "fsl,<chip>-gpmi-nand", chip can be:
+ * imx23
+ * imx28
+ * imx6q
+ * imx6sx
+ * imx7d
- reg : should contain registers location and length for gpmi and bch.
- reg-names: Should contain the reg names "gpmi-nand" and "bch"
- interrupts : BCH interrupt number.
@@ -13,6 +18,13 @@ Required properties:
and GPMI DMA channel ID.
Refer to dma.txt and fsl-mxs-dma.txt for details.
- dma-names: Must be "rx-tx".
+ - clocks : clocks phandle and clock specifier corresponding to each clock
+ specified in clock-names.
+ - clock-names : The "gpmi_io" clock is always required. Which clocks are
+ exactly required depends on chip:
+ * imx23/imx28 : "gpmi_io"
+ * imx6q/sx : "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch"
+ * imx7d : "gpmi_io", "gpmi_bch_apb"
Optional properties:
- nand-on-flash-bbt: boolean to enable on flash bbt option if not
diff --git a/Documentation/devicetree/bindings/mtd/microchip,mchp23k256.txt b/Documentation/devicetree/bindings/mtd/microchip,mchp23k256.txt
new file mode 100644
index 000000000000..7328eb92a03c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/microchip,mchp23k256.txt
@@ -0,0 +1,18 @@
+* MTD SPI driver for Microchip 23K256 (and similar) serial SRAM
+
+Required properties:
+- #address-cells, #size-cells : Must be present if the device has sub-nodes
+ representing partitions.
+- compatible : Must be one of "microchip,mchp23k256" or "microchip,mchp23lcv1024"
+- reg : Chip-Select number
+- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at
+
+Example:
+
+ spi-sram@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "microchip,mchp23k256";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ };
diff --git a/Documentation/devicetree/bindings/mtd/mtk-nand.txt b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
index 069c192ed5c2..dbf9e054c11c 100644
--- a/Documentation/devicetree/bindings/mtd/mtk-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
@@ -12,7 +12,8 @@ tree nodes.
The first part of NFC is NAND Controller Interface (NFI) HW.
Required NFI properties:
-- compatible: Should be "mediatek,mtxxxx-nfc".
+- compatible: Should be one of "mediatek,mt2701-nfc",
+ "mediatek,mt2712-nfc".
- reg: Base physical address and size of NFI.
- interrupts: Interrupts of NFI.
- clocks: NFI required clocks.
@@ -141,7 +142,7 @@ Example:
==============
Required BCH properties:
-- compatible: Should be "mediatek,mtxxxx-ecc".
+- compatible: Should be one of "mediatek,mt2701-ecc", "mediatek,mt2712-ecc".
- reg: Base physical address and size of ECC.
- interrupts: Interrupts of ECC.
- clocks: ECC required clocks.
diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
index b05601600083..133f3813719c 100644
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -21,7 +21,7 @@ Optional NAND chip properties:
- nand-ecc-mode : String, operation mode of the NAND ecc mode.
Supported values are: "none", "soft", "hw", "hw_syndrome",
- "hw_oob_first".
+ "hw_oob_first", "on-die".
Deprecated values:
"soft_bch": use "soft" and nand-ecc-algo instead
- nand-ecc-algo: string, algorithm of NAND ECC.
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt
index 81a224da63be..36f3b769a626 100644
--- a/Documentation/devicetree/bindings/mtd/partition.txt
+++ b/Documentation/devicetree/bindings/mtd/partition.txt
@@ -1,29 +1,49 @@
-Representing flash partitions in devicetree
+Flash partitions in device tree
+===============================
-Partitions can be represented by sub-nodes of an mtd device. This can be used
+Flash devices can be partitioned into one or more functional ranges (e.g. "boot
+code", "nvram", "kernel").
+
+Different devices may be partitioned in a different ways. Some may use a fixed
+flash layout set at production time. Some may use on-flash table that describes
+the geometry and naming/purpose of each functional region. It is also possible
+to see these methods mixed.
+
+To assist system software in locating partitions, we allow describing which
+method is used for a given flash device. To describe the method there should be
+a subnode of the flash device that is named 'partitions'. It must have a
+'compatible' property, which is used to identify the method to use.
+
+We currently only document a binding for fixed layouts.
+
+
+Fixed Partitions
+================
+
+Partitions can be represented by sub-nodes of a flash device. This can be used
on platforms which have strong conventions about which portions of a flash are
used for what purposes, but which don't use an on-flash partition table such
as RedBoot.
-The partition table should be a subnode of the mtd node and should be named
+The partition table should be a subnode of the flash node and should be named
'partitions'. This node should have the following property:
- compatible : (required) must be "fixed-partitions"
Partitions are then defined in subnodes of the partitions node.
-For backwards compatibility partitions as direct subnodes of the mtd device are
+For backwards compatibility partitions as direct subnodes of the flash device are
supported. This use is discouraged.
NOTE: also for backwards compatibility, direct subnodes that have a compatible
string are not considered partitions, as they may be used for other bindings.
#address-cells & #size-cells must both be present in the partitions subnode of the
-mtd device. There are two valid values for both:
+flash device. There are two valid values for both:
<1>: for partitions that require a single 32-bit cell to represent their
size/address (aka the value is below 4 GiB)
<2>: for partitions that require two 32-bit cells to represent their
size/address (aka the value is 4 GiB or greater).
Required properties:
-- reg : The partition's offset and size within the mtd bank.
+- reg : The partition's offset and size within the flash
Optional properties:
- label : The label / name for this partition. If omitted, the label is taken
diff --git a/Documentation/devicetree/bindings/net/brcm,amac.txt b/Documentation/devicetree/bindings/net/brcm,amac.txt
index 2fefa1a44afd..ad16c1f481f7 100644
--- a/Documentation/devicetree/bindings/net/brcm,amac.txt
+++ b/Documentation/devicetree/bindings/net/brcm,amac.txt
@@ -11,6 +11,7 @@ Required properties:
- reg-names: Names of the registers.
"amac_base": Address and length of the GMAC registers
"idm_base": Address and length of the GMAC IDM registers
+ (required for NSP and Northstar2)
"nicpm_base": Address and length of the NIC Port Manager
registers (required for Northstar2)
- interrupts: Interrupt number
diff --git a/Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt b/Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt
deleted file mode 100644
index 022946caa7e2..000000000000
--- a/Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Broadcom GMAC Ethernet Controller Device Tree Bindings
--------------------------------------------------------------
-
-Required properties:
- - compatible: "brcm,bgmac-nsp"
- - reg: Address and length of the GMAC registers,
- Address and length of the GMAC IDM registers
- - reg-names: Names of the registers. Must have both "gmac_base" and
- "idm_base"
- - interrupts: Interrupt number
-
-Optional properties:
-- mac-address: See ethernet.txt file in the same directory
-
-Examples:
-
-gmac0: ethernet@18022000 {
- compatible = "brcm,bgmac-nsp";
- reg = <0x18022000 0x1000>,
- <0x18110000 0x1000>;
- reg-names = "gmac_base", "idm_base";
- interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
-};
diff --git a/Documentation/devicetree/bindings/net/gpmc-eth.txt b/Documentation/devicetree/bindings/net/gpmc-eth.txt
index ace4a64b3695..f7da3d73ca1b 100644
--- a/Documentation/devicetree/bindings/net/gpmc-eth.txt
+++ b/Documentation/devicetree/bindings/net/gpmc-eth.txt
@@ -9,7 +9,7 @@ the GPMC controller with an "ethernet" name.
All timing relevant properties as well as generic GPMC child properties are
explained in a separate documents. Please refer to
-Documentation/devicetree/bindings/bus/ti-gpmc.txt
+Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
For the properties relevant to the ethernet controller connected to the GPMC
refer to the binding documentation of the device. For example, the documentation
@@ -43,7 +43,7 @@ Required properties:
Optional properties:
- gpmc,XXX Additional GPMC timings and settings parameters. See
- Documentation/devicetree/bindings/bus/ti-gpmc.txt
+ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
Example:
diff --git a/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt b/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt
index 07590bcdad15..7c04e22a5d6a 100644
--- a/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt
+++ b/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt
@@ -1,13 +1,20 @@
-* Broadcom Digital Timing Engine(DTE) based PTP clock driver
+* Broadcom Digital Timing Engine(DTE) based PTP clock
Required properties:
-- compatible: should be "brcm,ptp-dte"
+- compatible: should contain the core compatibility string
+ and the SoC compatibility string. The SoC
+ compatibility string is to handle SoC specific
+ hardware differences.
+ Core compatibility string:
+ "brcm,ptp-dte"
+ SoC compatibility strings:
+ "brcm,iproc-ptp-dte" - for iproc based SoC's
- reg: address and length of the DTE block's NCO registers
Example:
-ptp_dte: ptp_dte@180af650 {
- compatible = "brcm,ptp-dte";
+ptp: ptp-dte@180af650 {
+ compatible = "brcm,iproc-ptp-dte", "brcm,ptp-dte";
reg = <0x180af650 0x10>;
status = "okay";
};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-meson.txt b/Documentation/devicetree/bindings/pwm/pwm-meson.txt
index 5376a4468cb6..5b07bebbf6f7 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-meson.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-meson.txt
@@ -2,7 +2,9 @@ Amlogic Meson PWM Controller
============================
Required properties:
-- compatible: Shall contain "amlogic,meson8b-pwm" or "amlogic,meson-gxbb-pwm".
+- compatible: Shall contain "amlogic,meson8b-pwm"
+ or "amlogic,meson-gxbb-pwm"
+ or "amlogic,meson-gxbb-ao-pwm"
- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
the cells format.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-stm32.txt b/Documentation/devicetree/bindings/pwm/pwm-stm32.txt
index 6dd040363e5e..3e6d55018d7a 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-stm32.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-stm32.txt
@@ -24,7 +24,7 @@ Example:
compatible = "st,stm32-timers";
reg = <0x40010000 0x400>;
clocks = <&rcc 0 160>;
- clock-names = "clk_int";
+ clock-names = "int";
pwm {
compatible = "st,stm32-pwm";
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
index d6de64335022..7e94b802395d 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
+++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
@@ -8,6 +8,7 @@ Required Properties:
- "renesas,pwm-r8a7791": for R-Car M2-W
- "renesas,pwm-r8a7794": for R-Car E2
- "renesas,pwm-r8a7795": for R-Car H3
+ - "renesas,pwm-r8a7796": for R-Car M3-W
- reg: base address and length of the registers block for the PWM.
- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
the cells format.
diff --git a/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt b/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt
new file mode 100644
index 000000000000..1d990bcc0baf
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt
@@ -0,0 +1,22 @@
+Broadcom STB wake-up Timer
+
+The Broadcom STB wake-up timer provides a 27Mhz resolution timer, with the
+ability to wake up the system from low-power suspend/standby modes.
+
+Required properties:
+- compatible : should contain "brcm,brcmstb-waketimer"
+- reg : the register start and length for the WKTMR block
+- interrupts : The TIMER interrupt
+- interrupt-parent: The phandle to the Always-On (AON) Power Management (PM) L2
+ interrupt controller node
+- clocks : The phandle to the UPG fixed clock (27Mhz domain)
+
+Example:
+
+waketimer@f0411580 {
+ compatible = "brcm,brcmstb-waketimer";
+ reg = <0xf0411580 0x14>;
+ interrupts = <0x3>;
+ interrupt-parent = <&aon_pm_l2_intc>;
+ clocks = <&upg_fixed>;
+};
diff --git a/Documentation/devicetree/bindings/rtc/cortina,gemini.txt b/Documentation/devicetree/bindings/rtc/cortina,gemini.txt
deleted file mode 100644
index 4ce4e794ddbb..000000000000
--- a/Documentation/devicetree/bindings/rtc/cortina,gemini.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-* Cortina Systems Gemini RTC
-
-Gemini SoC real-time clock.
-
-Required properties:
-- compatible : Should be "cortina,gemini-rtc"
-
-Examples:
-
-rtc@45000000 {
- compatible = "cortina,gemini-rtc";
- reg = <0x45000000 0x100>;
- interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt b/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
new file mode 100644
index 000000000000..e3938f5e0b6c
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
@@ -0,0 +1,28 @@
+* Faraday Technology FTRTC010 Real Time Clock
+
+This RTC appears in for example the Storlink Gemini family of
+SoCs.
+
+Required properties:
+- compatible : Should be one of:
+ "faraday,ftrtc010"
+ "cortina,gemini-rtc", "faraday,ftrtc010"
+
+Optional properties:
+- clocks: when present should contain clock references to the
+ PCLK and EXTCLK clocks. Faraday calls the later CLK1HZ and
+ says the clock should be 1 Hz, but implementers actually seem
+ to choose different clocks here, like Cortina who chose
+ 32768 Hz (a typical low-power clock).
+- clock-names: should name the clocks "PCLK" and "EXTCLK"
+ respectively.
+
+Examples:
+
+rtc@45000000 {
+ compatible = "cortina,gemini-rtc";
+ reg = <0x45000000 0x100>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&foo 0>, <&foo 1>;
+ clock-names = "PCLK", "EXTCLK";
+};
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
index e2837b951237..0a4c371a9b7a 100644
--- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
@@ -1,17 +1,25 @@
STM32 Real Time Clock
Required properties:
-- compatible: "st,stm32-rtc".
+- compatible: can be either "st,stm32-rtc" or "st,stm32h7-rtc", depending on
+ the device is compatible with stm32(f4/f7) or stm32h7.
- reg: address range of rtc register set.
-- clocks: reference to the clock entry ck_rtc.
+- clocks: can use up to two clocks, depending on part used:
+ - "rtc_ck": RTC clock source.
+ It is required on stm32(f4/f7) and stm32h7.
+ - "pclk": RTC APB interface clock.
+ It is not present on stm32(f4/f7).
+ It is required on stm32h7.
+- clock-names: must be "rtc_ck" and "pclk".
+ It is required only on stm32h7.
- interrupt-parent: phandle for the interrupt controller.
- interrupts: rtc alarm interrupt.
- st,syscfg: phandle for pwrcfg, mandatory to disable/enable backup domain
(RTC registers) write protection.
-Optional properties (to override default ck_rtc parent clock):
-- assigned-clocks: reference to the ck_rtc clock entry.
-- assigned-clock-parents: phandle of the new parent clock of ck_rtc.
+Optional properties (to override default rtc_ck parent clock):
+- assigned-clocks: reference to the rtc_ck clock entry.
+- assigned-clock-parents: phandle of the new parent clock of rtc_ck.
Example:
@@ -25,3 +33,17 @@ Example:
interrupts = <17 1>;
st,syscfg = <&pwrcfg>;
};
+
+ rtc: rtc@58004000 {
+ compatible = "st,stm32h7-rtc";
+ reg = <0x58004000 0x400>;
+ clocks = <&rcc RTCAPB_CK>, <&rcc RTC_CK>;
+ clock-names = "pclk", "rtc_ck";
+ assigned-clocks = <&rcc RTC_CK>;
+ assigned-clock-parents = <&rcc LSE_CK>;
+ interrupt-parent = <&exti>;
+ interrupts = <17 1>;
+ interrupt-names = "alarm";
+ st,syscfg = <&pwrcfg>;
+ status = "disabled";
+ };
diff --git a/Documentation/digsig.txt b/Documentation/digsig.txt
index 3f682889068b..f6a8902d3ef7 100644
--- a/Documentation/digsig.txt
+++ b/Documentation/digsig.txt
@@ -1,13 +1,20 @@
+==================================
Digital Signature Verification API
+==================================
-CONTENTS
+:Author: Dmitry Kasatkin
+:Date: 06.10.2011
-1. Introduction
-2. API
-3. User-space utilities
+.. CONTENTS
-1. Introduction
+ 1. Introduction
+ 2. API
+ 3. User-space utilities
+
+
+Introduction
+============
Digital signature verification API provides a method to verify digital signature.
Currently digital signatures are used by the IMA/EVM integrity protection subsystem.
@@ -17,25 +24,25 @@ GnuPG multi-precision integers (MPI) library. The kernel port provides
memory allocation errors handling, has been refactored according to kernel
coding style, and checkpatch.pl reported errors and warnings have been fixed.
-Public key and signature consist of header and MPIs.
-
-struct pubkey_hdr {
- uint8_t version; /* key format version */
- time_t timestamp; /* key made, always 0 for now */
- uint8_t algo;
- uint8_t nmpi;
- char mpi[0];
-} __packed;
-
-struct signature_hdr {
- uint8_t version; /* signature format version */
- time_t timestamp; /* signature made */
- uint8_t algo;
- uint8_t hash;
- uint8_t keyid[8];
- uint8_t nmpi;
- char mpi[0];
-} __packed;
+Public key and signature consist of header and MPIs::
+
+ struct pubkey_hdr {
+ uint8_t version; /* key format version */
+ time_t timestamp; /* key made, always 0 for now */
+ uint8_t algo;
+ uint8_t nmpi;
+ char mpi[0];
+ } __packed;
+
+ struct signature_hdr {
+ uint8_t version; /* signature format version */
+ time_t timestamp; /* signature made */
+ uint8_t algo;
+ uint8_t hash;
+ uint8_t keyid[8];
+ uint8_t nmpi;
+ char mpi[0];
+ } __packed;
keyid equals to SHA1[12-19] over the total key content.
Signature header is used as an input to generate a signature.
@@ -43,31 +50,33 @@ Such approach insures that key or signature header could not be changed.
It protects timestamp from been changed and can be used for rollback
protection.
-2. API
+API
+===
-API currently includes only 1 function:
+API currently includes only 1 function::
digsig_verify() - digital signature verification with public key
-/**
- * digsig_verify() - digital signature verification with public key
- * @keyring: keyring to search key in
- * @sig: digital signature
- * @sigen: length of the signature
- * @data: data
- * @datalen: length of the data
- * @return: 0 on success, -EINVAL otherwise
- *
- * Verifies data integrity against digital signature.
- * Currently only RSA is supported.
- * Normally hash of the content is used as a data for this function.
- *
- */
-int digsig_verify(struct key *keyring, const char *sig, int siglen,
- const char *data, int datalen);
-
-3. User-space utilities
+ /**
+ * digsig_verify() - digital signature verification with public key
+ * @keyring: keyring to search key in
+ * @sig: digital signature
+ * @sigen: length of the signature
+ * @data: data
+ * @datalen: length of the data
+ * @return: 0 on success, -EINVAL otherwise
+ *
+ * Verifies data integrity against digital signature.
+ * Currently only RSA is supported.
+ * Normally hash of the content is used as a data for this function.
+ *
+ */
+ int digsig_verify(struct key *keyring, const char *sig, int siglen,
+ const char *data, int datalen);
+
+User-space utilities
+====================
The signing and key management utilities evm-utils provide functionality
to generate signatures, to load keys into the kernel keyring.
@@ -75,22 +84,18 @@ Keys can be in PEM or converted to the kernel format.
When the key is added to the kernel keyring, the keyid defines the name
of the key: 5D2B05FC633EE3E8 in the example bellow.
-Here is example output of the keyctl utility.
-
-$ keyctl show
-Session Keyring
- -3 --alswrv 0 0 keyring: _ses
-603976250 --alswrv 0 -1 \_ keyring: _uid.0
-817777377 --alswrv 0 0 \_ user: kmk
-891974900 --alswrv 0 0 \_ encrypted: evm-key
-170323636 --alswrv 0 0 \_ keyring: _module
-548221616 --alswrv 0 0 \_ keyring: _ima
-128198054 --alswrv 0 0 \_ keyring: _evm
-
-$ keyctl list 128198054
-1 key in keyring:
-620789745: --alswrv 0 0 user: 5D2B05FC633EE3E8
-
-
-Dmitry Kasatkin
-06.10.2011
+Here is example output of the keyctl utility::
+
+ $ keyctl show
+ Session Keyring
+ -3 --alswrv 0 0 keyring: _ses
+ 603976250 --alswrv 0 -1 \_ keyring: _uid.0
+ 817777377 --alswrv 0 0 \_ user: kmk
+ 891974900 --alswrv 0 0 \_ encrypted: evm-key
+ 170323636 --alswrv 0 0 \_ keyring: _module
+ 548221616 --alswrv 0 0 \_ keyring: _ima
+ 128198054 --alswrv 0 0 \_ keyring: _evm
+
+ $ keyctl list 128198054
+ 1 key in keyring:
+ 620789745: --alswrv 0 0 user: 5D2B05FC633EE3E8
diff --git a/Documentation/driver-api/basics.rst b/Documentation/driver-api/basics.rst
index 472e7a664d13..ab82250c7727 100644
--- a/Documentation/driver-api/basics.rst
+++ b/Documentation/driver-api/basics.rst
@@ -106,9 +106,6 @@ Kernel utility functions
.. kernel-doc:: kernel/sys.c
:export:
-.. kernel-doc:: kernel/rcu/srcu.c
- :export:
-
.. kernel-doc:: kernel/rcu/tree.c
:export:
diff --git a/Documentation/efi-stub.txt b/Documentation/efi-stub.txt
index e15746988261..41df801f9a50 100644
--- a/Documentation/efi-stub.txt
+++ b/Documentation/efi-stub.txt
@@ -1,5 +1,6 @@
- The EFI Boot Stub
- ---------------------------
+=================
+The EFI Boot Stub
+=================
On the x86 and ARM platforms, a kernel zImage/bzImage can masquerade
as a PE/COFF image, thereby convincing EFI firmware loaders to load
@@ -25,7 +26,8 @@ a certain sense it *IS* the boot loader.
The EFI boot stub is enabled with the CONFIG_EFI_STUB kernel option.
-**** How to install bzImage.efi
+How to install bzImage.efi
+--------------------------
The bzImage located in arch/x86/boot/bzImage must be copied to the EFI
System Partition (ESP) and renamed with the extension ".efi". Without
@@ -37,14 +39,16 @@ may not need to be renamed. Similarly for arm64, arch/arm64/boot/Image
should be copied but not necessarily renamed.
-**** Passing kernel parameters from the EFI shell
+Passing kernel parameters from the EFI shell
+--------------------------------------------
-Arguments to the kernel can be passed after bzImage.efi, e.g.
+Arguments to the kernel can be passed after bzImage.efi, e.g.::
fs0:> bzImage.efi console=ttyS0 root=/dev/sda4
-**** The "initrd=" option
+The "initrd=" option
+--------------------
Like most boot loaders, the EFI stub allows the user to specify
multiple initrd files using the "initrd=" option. This is the only EFI
@@ -54,9 +58,9 @@ kernel when it boots.
The path to the initrd file must be an absolute path from the
beginning of the ESP, relative path names do not work. Also, the path
is an EFI-style path and directory elements must be separated with
-backslashes (\). For example, given the following directory layout,
+backslashes (\). For example, given the following directory layout::
-fs0:>
+ fs0:>
Kernels\
bzImage.efi
initrd-large.img
@@ -66,7 +70,7 @@ fs0:>
initrd-medium.img
to boot with the initrd-large.img file if the current working
-directory is fs0:\Kernels, the following command must be used,
+directory is fs0:\Kernels, the following command must be used::
fs0:\Kernels> bzImage.efi initrd=\Kernels\initrd-large.img
@@ -76,7 +80,8 @@ which understands relative paths, whereas the rest of the command line
is passed to bzImage.efi.
-**** The "dtb=" option
+The "dtb=" option
+-----------------
For the ARM and arm64 architectures, we also need to be able to provide a
device tree to the kernel. This is done with the "dtb=" command line option,
diff --git a/Documentation/eisa.txt b/Documentation/eisa.txt
index a55e4910924e..2806e5544e43 100644
--- a/Documentation/eisa.txt
+++ b/Documentation/eisa.txt
@@ -1,4 +1,8 @@
-EISA bus support (Marc Zyngier <maz@wild-wind.fr.eu.org>)
+================
+EISA bus support
+================
+
+:Author: Marc Zyngier <maz@wild-wind.fr.eu.org>
This document groups random notes about porting EISA drivers to the
new EISA/sysfs API.
@@ -14,168 +18,189 @@ detection code is generally also used to probe ISA cards). Moreover,
most EISA drivers are among the oldest Linux drivers so, as you can
imagine, some dust has settled here over the years.
-The EISA infrastructure is made up of three parts :
+The EISA infrastructure is made up of three parts:
- The bus code implements most of the generic code. It is shared
- among all the architectures that the EISA code runs on. It
- implements bus probing (detecting EISA cards available on the bus),
- allocates I/O resources, allows fancy naming through sysfs, and
- offers interfaces for driver to register.
+ among all the architectures that the EISA code runs on. It
+ implements bus probing (detecting EISA cards available on the bus),
+ allocates I/O resources, allows fancy naming through sysfs, and
+ offers interfaces for driver to register.
- The bus root driver implements the glue between the bus hardware
- and the generic bus code. It is responsible for discovering the
- device implementing the bus, and setting it up to be latter probed
- by the bus code. This can go from something as simple as reserving
- an I/O region on x86, to the rather more complex, like the hppa
- EISA code. This is the part to implement in order to have EISA
- running on an "new" platform.
+ and the generic bus code. It is responsible for discovering the
+ device implementing the bus, and setting it up to be latter probed
+ by the bus code. This can go from something as simple as reserving
+ an I/O region on x86, to the rather more complex, like the hppa
+ EISA code. This is the part to implement in order to have EISA
+ running on an "new" platform.
- The driver offers the bus a list of devices that it manages, and
- implements the necessary callbacks to probe and release devices
- whenever told to.
+ implements the necessary callbacks to probe and release devices
+ whenever told to.
Every function/structure below lives in <linux/eisa.h>, which depends
heavily on <linux/device.h>.
-** Bus root driver :
+Bus root driver
+===============
+
+::
-int eisa_root_register (struct eisa_root_device *root);
+ int eisa_root_register (struct eisa_root_device *root);
The eisa_root_register function is used to declare a device as the
root of an EISA bus. The eisa_root_device structure holds a reference
-to this device, as well as some parameters for probing purposes.
-
-struct eisa_root_device {
- struct device *dev; /* Pointer to bridge device */
- struct resource *res;
- unsigned long bus_base_addr;
- int slots; /* Max slot number */
- int force_probe; /* Probe even when no slot 0 */
- u64 dma_mask; /* from bridge device */
- int bus_nr; /* Set by eisa_root_register */
- struct resource eisa_root_res; /* ditto */
-};
-
-node : used for eisa_root_register internal purpose
-dev : pointer to the root device
-res : root device I/O resource
-bus_base_addr : slot 0 address on this bus
-slots : max slot number to probe
-force_probe : Probe even when slot 0 is empty (no EISA mainboard)
-dma_mask : Default DMA mask. Usually the bridge device dma_mask.
-bus_nr : unique bus id, set by eisa_root_register
-
-** Driver :
-
-int eisa_driver_register (struct eisa_driver *edrv);
-void eisa_driver_unregister (struct eisa_driver *edrv);
+to this device, as well as some parameters for probing purposes::
+
+ struct eisa_root_device {
+ struct device *dev; /* Pointer to bridge device */
+ struct resource *res;
+ unsigned long bus_base_addr;
+ int slots; /* Max slot number */
+ int force_probe; /* Probe even when no slot 0 */
+ u64 dma_mask; /* from bridge device */
+ int bus_nr; /* Set by eisa_root_register */
+ struct resource eisa_root_res; /* ditto */
+ };
+
+============= ======================================================
+node used for eisa_root_register internal purpose
+dev pointer to the root device
+res root device I/O resource
+bus_base_addr slot 0 address on this bus
+slots max slot number to probe
+force_probe Probe even when slot 0 is empty (no EISA mainboard)
+dma_mask Default DMA mask. Usually the bridge device dma_mask.
+bus_nr unique bus id, set by eisa_root_register
+============= ======================================================
+
+Driver
+======
+
+::
+
+ int eisa_driver_register (struct eisa_driver *edrv);
+ void eisa_driver_unregister (struct eisa_driver *edrv);
Clear enough ?
-struct eisa_device_id {
- char sig[EISA_SIG_LEN];
- unsigned long driver_data;
-};
-
-struct eisa_driver {
- const struct eisa_device_id *id_table;
- struct device_driver driver;
-};
-
-id_table : an array of NULL terminated EISA id strings,
- followed by an empty string. Each string can
- optionally be paired with a driver-dependent value
- (driver_data).
-
-driver : a generic driver, such as described in
- Documentation/driver-model/driver.txt. Only .name,
- .probe and .remove members are mandatory.
-
-An example is the 3c59x driver :
-
-static struct eisa_device_id vortex_eisa_ids[] = {
- { "TCM5920", EISA_3C592_OFFSET },
- { "TCM5970", EISA_3C597_OFFSET },
- { "" }
-};
-
-static struct eisa_driver vortex_eisa_driver = {
- .id_table = vortex_eisa_ids,
- .driver = {
- .name = "3c59x",
- .probe = vortex_eisa_probe,
- .remove = vortex_eisa_remove
- }
-};
-
-** Device :
+::
+
+ struct eisa_device_id {
+ char sig[EISA_SIG_LEN];
+ unsigned long driver_data;
+ };
+
+ struct eisa_driver {
+ const struct eisa_device_id *id_table;
+ struct device_driver driver;
+ };
+
+=============== ====================================================
+id_table an array of NULL terminated EISA id strings,
+ followed by an empty string. Each string can
+ optionally be paired with a driver-dependent value
+ (driver_data).
+
+driver a generic driver, such as described in
+ Documentation/driver-model/driver.txt. Only .name,
+ .probe and .remove members are mandatory.
+=============== ====================================================
+
+An example is the 3c59x driver::
+
+ static struct eisa_device_id vortex_eisa_ids[] = {
+ { "TCM5920", EISA_3C592_OFFSET },
+ { "TCM5970", EISA_3C597_OFFSET },
+ { "" }
+ };
+
+ static struct eisa_driver vortex_eisa_driver = {
+ .id_table = vortex_eisa_ids,
+ .driver = {
+ .name = "3c59x",
+ .probe = vortex_eisa_probe,
+ .remove = vortex_eisa_remove
+ }
+ };
+
+Device
+======
The sysfs framework calls .probe and .remove functions upon device
discovery and removal (note that the .remove function is only called
when driver is built as a module).
Both functions are passed a pointer to a 'struct device', which is
-encapsulated in a 'struct eisa_device' described as follows :
-
-struct eisa_device {
- struct eisa_device_id id;
- int slot;
- int state;
- unsigned long base_addr;
- struct resource res[EISA_MAX_RESOURCES];
- u64 dma_mask;
- struct device dev; /* generic device */
-};
-
-id : EISA id, as read from device. id.driver_data is set from the
- matching driver EISA id.
-slot : slot number which the device was detected on
-state : set of flags indicating the state of the device. Current
- flags are EISA_CONFIG_ENABLED and EISA_CONFIG_FORCED.
-res : set of four 256 bytes I/O regions allocated to this device
-dma_mask: DMA mask set from the parent device.
-dev : generic device (see Documentation/driver-model/device.txt)
+encapsulated in a 'struct eisa_device' described as follows::
+
+ struct eisa_device {
+ struct eisa_device_id id;
+ int slot;
+ int state;
+ unsigned long base_addr;
+ struct resource res[EISA_MAX_RESOURCES];
+ u64 dma_mask;
+ struct device dev; /* generic device */
+ };
+
+======== ============================================================
+id EISA id, as read from device. id.driver_data is set from the
+ matching driver EISA id.
+slot slot number which the device was detected on
+state set of flags indicating the state of the device. Current
+ flags are EISA_CONFIG_ENABLED and EISA_CONFIG_FORCED.
+res set of four 256 bytes I/O regions allocated to this device
+dma_mask DMA mask set from the parent device.
+dev generic device (see Documentation/driver-model/device.txt)
+======== ============================================================
You can get the 'struct eisa_device' from 'struct device' using the
'to_eisa_device' macro.
-** Misc stuff :
+Misc stuff
+==========
+
+::
-void eisa_set_drvdata (struct eisa_device *edev, void *data);
+ void eisa_set_drvdata (struct eisa_device *edev, void *data);
Stores data into the device's driver_data area.
-void *eisa_get_drvdata (struct eisa_device *edev):
+::
+
+ void *eisa_get_drvdata (struct eisa_device *edev):
Gets the pointer previously stored into the device's driver_data area.
-int eisa_get_region_index (void *addr);
+::
+
+ int eisa_get_region_index (void *addr);
Returns the region number (0 <= x < EISA_MAX_RESOURCES) of a given
address.
-** Kernel parameters :
+Kernel parameters
+=================
-eisa_bus.enable_dev :
+eisa_bus.enable_dev
+ A comma-separated list of slots to be enabled, even if the firmware
+ set the card as disabled. The driver must be able to properly
+ initialize the device in such conditions.
-A comma-separated list of slots to be enabled, even if the firmware
-set the card as disabled. The driver must be able to properly
-initialize the device in such conditions.
+eisa_bus.disable_dev
+ A comma-separated list of slots to be enabled, even if the firmware
+ set the card as enabled. The driver won't be called to handle this
+ device.
-eisa_bus.disable_dev :
+virtual_root.force_probe
+ Force the probing code to probe EISA slots even when it cannot find an
+ EISA compliant mainboard (nothing appears on slot 0). Defaults to 0
+ (don't force), and set to 1 (force probing) when either
+ CONFIG_ALPHA_JENSEN or CONFIG_EISA_VLB_PRIMING are set.
-A comma-separated list of slots to be enabled, even if the firmware
-set the card as enabled. The driver won't be called to handle this
-device.
-
-virtual_root.force_probe :
-
-Force the probing code to probe EISA slots even when it cannot find an
-EISA compliant mainboard (nothing appears on slot 0). Defaults to 0
-(don't force), and set to 1 (force probing) when either
-CONFIG_ALPHA_JENSEN or CONFIG_EISA_VLB_PRIMING are set.
-
-** Random notes :
+Random notes
+============
Converting an EISA driver to the new API mostly involves *deleting*
code (since probing is now in the core EISA code). Unfortunately, most
@@ -194,9 +219,11 @@ routine.
For example, switching your favorite EISA SCSI card to the "hotplug"
model is "the right thing"(tm).
-** Thanks :
+Thanks
+======
+
+I'd like to thank the following people for their help:
-I'd like to thank the following people for their help :
- Xavier Benigni for lending me a wonderful Alpha Jensen,
- James Bottomley, Jeff Garzik for getting this stuff into the kernel,
- Andries Brouwer for contributing numerous EISA ids,
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 415484f3d59a..918972babcd8 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -134,6 +134,23 @@ use the boot option:
fail_futex=
mmc_core.fail_request=<interval>,<probability>,<space>,<times>
+o proc entries
+
+- /proc/<pid>/fail-nth:
+- /proc/self/task/<tid>/fail-nth:
+
+ Write to this file of integer N makes N-th call in the task fail.
+ Read from this file returns a integer value. A value of '0' indicates
+ that the fault setup with a previous write to this file was injected.
+ A positive integer N indicates that the fault wasn't yet injected.
+ Note that this file enables all types of faults (slab, futex, etc).
+ This setting takes precedence over all other generic debugfs settings
+ like probability, interval, times, etc. But per-capability settings
+ (e.g. fail_futex/ignore-private) take precedence over it.
+
+ This feature is intended for systematic testing of faults in a single
+ system call. See an example below.
+
How to add new fault injection capability
-----------------------------------------
@@ -278,3 +295,65 @@ allocation failure.
# env FAILCMD_TYPE=fail_page_alloc \
./tools/testing/fault-injection/failcmd.sh --times=100 \
-- make -C tools/testing/selftests/ run_tests
+
+Systematic faults using fail-nth
+---------------------------------
+
+The following code systematically faults 0-th, 1-st, 2-nd and so on
+capabilities in the socketpair() system call.
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/syscall.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+
+int main()
+{
+ int i, err, res, fail_nth, fds[2];
+ char buf[128];
+
+ system("echo N > /sys/kernel/debug/failslab/ignore-gfp-wait");
+ sprintf(buf, "/proc/self/task/%ld/fail-nth", syscall(SYS_gettid));
+ fail_nth = open(buf, O_RDWR);
+ for (i = 1;; i++) {
+ sprintf(buf, "%d", i);
+ write(fail_nth, buf, strlen(buf));
+ res = socketpair(AF_LOCAL, SOCK_STREAM, 0, fds);
+ err = errno;
+ pread(fail_nth, buf, sizeof(buf), 0);
+ if (res == 0) {
+ close(fds[0]);
+ close(fds[1]);
+ }
+ printf("%d-th fault %c: res=%d/%d\n", i, atoi(buf) ? 'N' : 'Y',
+ res, err);
+ if (atoi(buf))
+ break;
+ }
+ return 0;
+}
+
+An example output:
+
+1-th fault Y: res=-1/23
+2-th fault Y: res=-1/23
+3-th fault Y: res=-1/12
+4-th fault Y: res=-1/12
+5-th fault Y: res=-1/23
+6-th fault Y: res=-1/23
+7-th fault Y: res=-1/23
+8-th fault Y: res=-1/12
+9-th fault Y: res=-1/12
+10-th fault Y: res=-1/12
+11-th fault Y: res=-1/12
+12-th fault Y: res=-1/12
+13-th fault Y: res=-1/12
+14-th fault Y: res=-1/12
+15-th fault Y: res=-1/12
+16-th fault N: res=0/12
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 4cddbce85ac9..adba21b5ada7 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1786,12 +1786,16 @@ pair provide additional information particular to the objects they represent.
pos: 0
flags: 02
mnt_id: 9
- tfd: 5 events: 1d data: ffffffffffffffff
+ tfd: 5 events: 1d data: ffffffffffffffff pos:0 ino:61af sdev:7
where 'tfd' is a target file descriptor number in decimal form,
'events' is events mask being watched and the 'data' is data
associated with a target [see epoll(7) for more details].
+ The 'pos' is current offset of the target file in decimal form
+ [see lseek(2)], 'ino' and 'sdev' are inode and device numbers
+ where target file resides, all in hex format.
+
Fsnotify files
~~~~~~~~~~~~~~
For inotify files the format is the following
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 48c9faa73a76..73e7d91f03dc 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -1225,12 +1225,6 @@ The underlying reason for the above rules is to make sure, that a
mount can be accurately replicated (e.g. umounting and mounting again)
based on the information found in /proc/mounts.
-A simple method of saving options at mount/remount time and showing
-them is provided with the save_mount_options() and
-generic_show_options() helper functions. Please note, that using
-these may have drawbacks. For more info see header comments for these
-functions in fs/namespace.c.
-
Resources
=========
diff --git a/Documentation/flexible-arrays.txt b/Documentation/flexible-arrays.txt
index df904aec9904..a0f2989dd804 100644
--- a/Documentation/flexible-arrays.txt
+++ b/Documentation/flexible-arrays.txt
@@ -1,6 +1,9 @@
+===================================
Using flexible arrays in the kernel
-Last updated for 2.6.32
-Jonathan Corbet <corbet@lwn.net>
+===================================
+
+:Updated: Last updated for 2.6.32
+:Author: Jonathan Corbet <corbet@lwn.net>
Large contiguous memory allocations can be unreliable in the Linux kernel.
Kernel programmers will sometimes respond to this problem by allocating
@@ -26,7 +29,7 @@ operation. It's also worth noting that flexible arrays do no internal
locking at all; if concurrent access to an array is possible, then the
caller must arrange for appropriate mutual exclusion.
-The creation of a flexible array is done with:
+The creation of a flexible array is done with::
#include <linux/flex_array.h>
@@ -40,14 +43,14 @@ argument is passed directly to the internal memory allocation calls. With
the current code, using flags to ask for high memory is likely to lead to
notably unpleasant side effects.
-It is also possible to define flexible arrays at compile time with:
+It is also possible to define flexible arrays at compile time with::
DEFINE_FLEX_ARRAY(name, element_size, total);
This macro will result in a definition of an array with the given name; the
element size and total will be checked for validity at compile time.
-Storing data into a flexible array is accomplished with a call to:
+Storing data into a flexible array is accomplished with a call to::
int flex_array_put(struct flex_array *array, unsigned int element_nr,
void *src, gfp_t flags);
@@ -63,7 +66,7 @@ running in some sort of atomic context; in this situation, sleeping in the
memory allocator would be a bad thing. That can be avoided by using
GFP_ATOMIC for the flags value, but, often, there is a better way. The
trick is to ensure that any needed memory allocations are done before
-entering atomic context, using:
+entering atomic context, using::
int flex_array_prealloc(struct flex_array *array, unsigned int start,
unsigned int nr_elements, gfp_t flags);
@@ -73,7 +76,7 @@ defined by start and nr_elements has been allocated. Thereafter, a
flex_array_put() call on an element in that range is guaranteed not to
block.
-Getting data back out of the array is done with:
+Getting data back out of the array is done with::
void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
@@ -89,7 +92,7 @@ involving that number probably result from use of unstored array entries.
Note that, if array elements are allocated with __GFP_ZERO, they will be
initialized to zero and this poisoning will not happen.
-Individual elements in the array can be cleared with:
+Individual elements in the array can be cleared with::
int flex_array_clear(struct flex_array *array, unsigned int element_nr);
@@ -97,7 +100,7 @@ This function will set the given element to FLEX_ARRAY_FREE and return
zero. If storage for the indicated element is not allocated for the array,
flex_array_clear() will return -EINVAL instead. Note that clearing an
element does not release the storage associated with it; to reduce the
-allocated size of an array, call:
+allocated size of an array, call::
int flex_array_shrink(struct flex_array *array);
@@ -106,12 +109,12 @@ This function works by scanning the array for pages containing nothing but
FLEX_ARRAY_FREE bytes, so (1) it can be expensive, and (2) it will not work
if the array's pages are allocated with __GFP_ZERO.
-It is possible to remove all elements of an array with a call to:
+It is possible to remove all elements of an array with a call to::
void flex_array_free_parts(struct flex_array *array);
This call frees all elements, but leaves the array itself in place.
-Freeing the entire array is done with:
+Freeing the entire array is done with::
void flex_array_free(struct flex_array *array);
diff --git a/Documentation/futex-requeue-pi.txt b/Documentation/futex-requeue-pi.txt
index 77b36f59d16b..14ab5787b9a7 100644
--- a/Documentation/futex-requeue-pi.txt
+++ b/Documentation/futex-requeue-pi.txt
@@ -1,5 +1,6 @@
+================
Futex Requeue PI
-----------------
+================
Requeueing of tasks from a non-PI futex to a PI futex requires
special handling in order to ensure the underlying rt_mutex is never
@@ -20,28 +21,28 @@ implementation would wake the highest-priority waiter, and leave the
rest to the natural wakeup inherent in unlocking the mutex
associated with the condvar.
-Consider the simplified glibc calls:
-
-/* caller must lock mutex */
-pthread_cond_wait(cond, mutex)
-{
- lock(cond->__data.__lock);
- unlock(mutex);
- do {
- unlock(cond->__data.__lock);
- futex_wait(cond->__data.__futex);
- lock(cond->__data.__lock);
- } while(...)
- unlock(cond->__data.__lock);
- lock(mutex);
-}
-
-pthread_cond_broadcast(cond)
-{
- lock(cond->__data.__lock);
- unlock(cond->__data.__lock);
- futex_requeue(cond->data.__futex, cond->mutex);
-}
+Consider the simplified glibc calls::
+
+ /* caller must lock mutex */
+ pthread_cond_wait(cond, mutex)
+ {
+ lock(cond->__data.__lock);
+ unlock(mutex);
+ do {
+ unlock(cond->__data.__lock);
+ futex_wait(cond->__data.__futex);
+ lock(cond->__data.__lock);
+ } while(...)
+ unlock(cond->__data.__lock);
+ lock(mutex);
+ }
+
+ pthread_cond_broadcast(cond)
+ {
+ lock(cond->__data.__lock);
+ unlock(cond->__data.__lock);
+ futex_requeue(cond->data.__futex, cond->mutex);
+ }
Once pthread_cond_broadcast() requeues the tasks, the cond->mutex
has waiters. Note that pthread_cond_wait() attempts to lock the
@@ -53,29 +54,29 @@ In order to support PI-aware pthread_condvar's, the kernel needs to
be able to requeue tasks to PI futexes. This support implies that
upon a successful futex_wait system call, the caller would return to
user space already holding the PI futex. The glibc implementation
-would be modified as follows:
-
-
-/* caller must lock mutex */
-pthread_cond_wait_pi(cond, mutex)
-{
- lock(cond->__data.__lock);
- unlock(mutex);
- do {
- unlock(cond->__data.__lock);
- futex_wait_requeue_pi(cond->__data.__futex);
- lock(cond->__data.__lock);
- } while(...)
- unlock(cond->__data.__lock);
- /* the kernel acquired the mutex for us */
-}
-
-pthread_cond_broadcast_pi(cond)
-{
- lock(cond->__data.__lock);
- unlock(cond->__data.__lock);
- futex_requeue_pi(cond->data.__futex, cond->mutex);
-}
+would be modified as follows::
+
+
+ /* caller must lock mutex */
+ pthread_cond_wait_pi(cond, mutex)
+ {
+ lock(cond->__data.__lock);
+ unlock(mutex);
+ do {
+ unlock(cond->__data.__lock);
+ futex_wait_requeue_pi(cond->__data.__futex);
+ lock(cond->__data.__lock);
+ } while(...)
+ unlock(cond->__data.__lock);
+ /* the kernel acquired the mutex for us */
+ }
+
+ pthread_cond_broadcast_pi(cond)
+ {
+ lock(cond->__data.__lock);
+ unlock(cond->__data.__lock);
+ futex_requeue_pi(cond->data.__futex, cond->mutex);
+ }
The actual glibc implementation will likely test for PI and make the
necessary changes inside the existing calls rather than creating new
diff --git a/Documentation/gcc-plugins.txt b/Documentation/gcc-plugins.txt
index 433eaefb4aa1..8502f24396fb 100644
--- a/Documentation/gcc-plugins.txt
+++ b/Documentation/gcc-plugins.txt
@@ -1,14 +1,15 @@
+=========================
GCC plugin infrastructure
=========================
-1. Introduction
-===============
+Introduction
+============
GCC plugins are loadable modules that provide extra features to the
-compiler [1]. They are useful for runtime instrumentation and static analysis.
+compiler [1]_. They are useful for runtime instrumentation and static analysis.
We can analyse, change and add further code during compilation via
-callbacks [2], GIMPLE [3], IPA [4] and RTL passes [5].
+callbacks [2]_, GIMPLE [3]_, IPA [4]_ and RTL passes [5]_.
The GCC plugin infrastructure of the kernel supports all gcc versions from
4.5 to 6.0, building out-of-tree modules, cross-compilation and building in a
@@ -21,56 +22,61 @@ and versions 4.8+ can only be compiled by a C++ compiler.
Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
powerpc architectures.
-This infrastructure was ported from grsecurity [6] and PaX [7].
+This infrastructure was ported from grsecurity [6]_ and PaX [7]_.
--
-[1] https://gcc.gnu.org/onlinedocs/gccint/Plugins.html
-[2] https://gcc.gnu.org/onlinedocs/gccint/Plugin-API.html#Plugin-API
-[3] https://gcc.gnu.org/onlinedocs/gccint/GIMPLE.html
-[4] https://gcc.gnu.org/onlinedocs/gccint/IPA.html
-[5] https://gcc.gnu.org/onlinedocs/gccint/RTL.html
-[6] https://grsecurity.net/
-[7] https://pax.grsecurity.net/
+.. [1] https://gcc.gnu.org/onlinedocs/gccint/Plugins.html
+.. [2] https://gcc.gnu.org/onlinedocs/gccint/Plugin-API.html#Plugin-API
+.. [3] https://gcc.gnu.org/onlinedocs/gccint/GIMPLE.html
+.. [4] https://gcc.gnu.org/onlinedocs/gccint/IPA.html
+.. [5] https://gcc.gnu.org/onlinedocs/gccint/RTL.html
+.. [6] https://grsecurity.net/
+.. [7] https://pax.grsecurity.net/
+
+
+Files
+=====
-2. Files
-========
+**$(src)/scripts/gcc-plugins**
-$(src)/scripts/gcc-plugins
This is the directory of the GCC plugins.
-$(src)/scripts/gcc-plugins/gcc-common.h
+**$(src)/scripts/gcc-plugins/gcc-common.h**
+
This is a compatibility header for GCC plugins.
It should be always included instead of individual gcc headers.
-$(src)/scripts/gcc-plugin.sh
+**$(src)/scripts/gcc-plugin.sh**
+
This script checks the availability of the included headers in
gcc-common.h and chooses the proper host compiler to build the plugins
(gcc-4.7 can be built by either gcc or g++).
-$(src)/scripts/gcc-plugins/gcc-generate-gimple-pass.h
-$(src)/scripts/gcc-plugins/gcc-generate-ipa-pass.h
-$(src)/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h
-$(src)/scripts/gcc-plugins/gcc-generate-rtl-pass.h
+**$(src)/scripts/gcc-plugins/gcc-generate-gimple-pass.h,
+$(src)/scripts/gcc-plugins/gcc-generate-ipa-pass.h,
+$(src)/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h,
+$(src)/scripts/gcc-plugins/gcc-generate-rtl-pass.h**
+
These headers automatically generate the registration structures for
GIMPLE, SIMPLE_IPA, IPA and RTL passes. They support all gcc versions
from 4.5 to 6.0.
They should be preferred to creating the structures by hand.
-3. Usage
-========
+Usage
+=====
You must install the gcc plugin headers for your gcc version,
-e.g., on Ubuntu for gcc-4.9:
+e.g., on Ubuntu for gcc-4.9::
apt-get install gcc-4.9-plugin-dev
-Enable a GCC plugin based feature in the kernel config:
+Enable a GCC plugin based feature in the kernel config::
CONFIG_GCC_PLUGIN_CYC_COMPLEXITY = y
-To compile only the plugin(s):
+To compile only the plugin(s)::
make gcc-plugins
diff --git a/Documentation/highuid.txt b/Documentation/highuid.txt
index 6bad6f1d1cac..6ee70465c0ea 100644
--- a/Documentation/highuid.txt
+++ b/Documentation/highuid.txt
@@ -1,4 +1,9 @@
-Notes on the change from 16-bit UIDs to 32-bit UIDs:
+===================================================
+Notes on the change from 16-bit UIDs to 32-bit UIDs
+===================================================
+
+:Author: Chris Wing <wingc@umich.edu>
+:Last updated: January 11, 2000
- kernel code MUST take into account __kernel_uid_t and __kernel_uid32_t
when communicating between user and kernel space in an ioctl or data
@@ -28,30 +33,34 @@ What's left to be done for 32-bit UIDs on all Linux architectures:
uses the 32-bit UID system calls properly otherwise.
This affects at least:
- iBCS on Intel
- sparc32 emulation on sparc64
- (need to support whatever new 32-bit UID system calls are added to
- sparc32)
+ - iBCS on Intel
+
+ - sparc32 emulation on sparc64
+ (need to support whatever new 32-bit UID system calls are added to
+ sparc32)
- Validate that all filesystems behave properly.
At present, 32-bit UIDs _should_ work for:
- ext2
- ufs
- isofs
- nfs
- coda
- udf
+
+ - ext2
+ - ufs
+ - isofs
+ - nfs
+ - coda
+ - udf
Ioctl() fixups have been made for:
- ncpfs
- smbfs
+
+ - ncpfs
+ - smbfs
Filesystems with simple fixups to prevent 16-bit UID wraparound:
- minix
- sysv
- qnx4
+
+ - minix
+ - sysv
+ - qnx4
Other filesystems have not been checked yet.
@@ -69,9 +78,3 @@ What's left to be done for 32-bit UIDs on all Linux architectures:
- make sure that the UID mapping feature of AX25 networking works properly
(it should be safe because it's always used a 32-bit integer to
communicate between user and kernel)
-
-
-Chris Wing
-wingc@umich.edu
-
-last updated: January 11, 2000
diff --git a/Documentation/hw_random.txt b/Documentation/hw_random.txt
index fce1634907d0..121de96e395e 100644
--- a/Documentation/hw_random.txt
+++ b/Documentation/hw_random.txt
@@ -1,90 +1,105 @@
-Introduction:
-
- The hw_random framework is software that makes use of a
- special hardware feature on your CPU or motherboard,
- a Random Number Generator (RNG). The software has two parts:
- a core providing the /dev/hwrng character device and its
- sysfs support, plus a hardware-specific driver that plugs
- into that core.
-
- To make the most effective use of these mechanisms, you
- should download the support software as well. Download the
- latest version of the "rng-tools" package from the
- hw_random driver's official Web site:
-
- http://sourceforge.net/projects/gkernel/
-
- Those tools use /dev/hwrng to fill the kernel entropy pool,
- which is used internally and exported by the /dev/urandom and
- /dev/random special files.
-
-Theory of operation:
-
- CHARACTER DEVICE. Using the standard open()
- and read() system calls, you can read random data from
- the hardware RNG device. This data is NOT CHECKED by any
- fitness tests, and could potentially be bogus (if the
- hardware is faulty or has been tampered with). Data is only
- output if the hardware "has-data" flag is set, but nevertheless
- a security-conscious person would run fitness tests on the
- data before assuming it is truly random.
-
- The rng-tools package uses such tests in "rngd", and lets you
- run them by hand with a "rngtest" utility.
-
- /dev/hwrng is char device major 10, minor 183.
-
- CLASS DEVICE. There is a /sys/class/misc/hw_random node with
- two unique attributes, "rng_available" and "rng_current". The
- "rng_available" attribute lists the hardware-specific drivers
- available, while "rng_current" lists the one which is currently
- connected to /dev/hwrng. If your system has more than one
- RNG available, you may change the one used by writing a name from
- the list in "rng_available" into "rng_current".
+==========================================================
+Linux support for random number generator in i8xx chipsets
+==========================================================
+
+Introduction
+============
+
+The hw_random framework is software that makes use of a
+special hardware feature on your CPU or motherboard,
+a Random Number Generator (RNG). The software has two parts:
+a core providing the /dev/hwrng character device and its
+sysfs support, plus a hardware-specific driver that plugs
+into that core.
+
+To make the most effective use of these mechanisms, you
+should download the support software as well. Download the
+latest version of the "rng-tools" package from the
+hw_random driver's official Web site:
+
+ http://sourceforge.net/projects/gkernel/
+
+Those tools use /dev/hwrng to fill the kernel entropy pool,
+which is used internally and exported by the /dev/urandom and
+/dev/random special files.
+
+Theory of operation
+===================
+
+CHARACTER DEVICE. Using the standard open()
+and read() system calls, you can read random data from
+the hardware RNG device. This data is NOT CHECKED by any
+fitness tests, and could potentially be bogus (if the
+hardware is faulty or has been tampered with). Data is only
+output if the hardware "has-data" flag is set, but nevertheless
+a security-conscious person would run fitness tests on the
+data before assuming it is truly random.
+
+The rng-tools package uses such tests in "rngd", and lets you
+run them by hand with a "rngtest" utility.
+
+/dev/hwrng is char device major 10, minor 183.
+
+CLASS DEVICE. There is a /sys/class/misc/hw_random node with
+two unique attributes, "rng_available" and "rng_current". The
+"rng_available" attribute lists the hardware-specific drivers
+available, while "rng_current" lists the one which is currently
+connected to /dev/hwrng. If your system has more than one
+RNG available, you may change the one used by writing a name from
+the list in "rng_available" into "rng_current".
==========================================================================
- Hardware driver for Intel/AMD/VIA Random Number Generators (RNG)
- Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
- Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+Hardware driver for Intel/AMD/VIA Random Number Generators (RNG)
+ - Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ - Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
-About the Intel RNG hardware, from the firmware hub datasheet:
- The Firmware Hub integrates a Random Number Generator (RNG)
- using thermal noise generated from inherently random quantum
- mechanical properties of silicon. When not generating new random
- bits the RNG circuitry will enter a low power state. Intel will
- provide a binary software driver to give third party software
- access to our RNG for use as a security feature. At this time,
- the RNG is only to be used with a system in an OS-present state.
+About the Intel RNG hardware, from the firmware hub datasheet
+=============================================================
-Intel RNG Driver notes:
+The Firmware Hub integrates a Random Number Generator (RNG)
+using thermal noise generated from inherently random quantum
+mechanical properties of silicon. When not generating new random
+bits the RNG circuitry will enter a low power state. Intel will
+provide a binary software driver to give third party software
+access to our RNG for use as a security feature. At this time,
+the RNG is only to be used with a system in an OS-present state.
- * FIXME: support poll(2)
+Intel RNG Driver notes
+======================
- NOTE: request_mem_region was removed, for three reasons:
- 1) Only one RNG is supported by this driver, 2) The location
- used by the RNG is a fixed location in MMIO-addressable memory,
+FIXME: support poll(2)
+
+.. note::
+
+ request_mem_region was removed, for three reasons:
+
+ 1) Only one RNG is supported by this driver;
+ 2) The location used by the RNG is a fixed location in
+ MMIO-addressable memory;
3) users with properly working BIOS e820 handling will always
- have the region in which the RNG is located reserved, so
- request_mem_region calls always fail for proper setups.
- However, for people who use mem=XX, BIOS e820 information is
- -not- in /proc/iomem, and request_mem_region(RNG_ADDR) can
- succeed.
+ have the region in which the RNG is located reserved, so
+ request_mem_region calls always fail for proper setups.
+ However, for people who use mem=XX, BIOS e820 information is
+ **not** in /proc/iomem, and request_mem_region(RNG_ADDR) can
+ succeed.
-Driver details:
+Driver details
+==============
- Based on:
+Based on:
Intel 82802AB/82802AC Firmware Hub (FWH) Datasheet
- May 1999 Order Number: 290658-002 R
+ May 1999 Order Number: 290658-002 R
- Intel 82802 Firmware Hub: Random Number Generator
+Intel 82802 Firmware Hub:
+ Random Number Generator
Programmer's Reference Manual
- December 1999 Order Number: 298029-001 R
+ December 1999 Order Number: 298029-001 R
- Intel 82802 Firmware HUB Random Number Generator Driver
+Intel 82802 Firmware HUB Random Number Generator Driver
Copyright (c) 2000 Matt Sottek <msottek@quiknet.com>
- Special thanks to Matt Sottek. I did the "guts", he
- did the "brains" and all the testing.
+Special thanks to Matt Sottek. I did the "guts", he
+did the "brains" and all the testing.
diff --git a/Documentation/hwspinlock.txt b/Documentation/hwspinlock.txt
index 61c1ee98e59f..ed640a278185 100644
--- a/Documentation/hwspinlock.txt
+++ b/Documentation/hwspinlock.txt
@@ -1,6 +1,9 @@
+===========================
Hardware Spinlock Framework
+===========================
-1. Introduction
+Introduction
+============
Hardware spinlock modules provide hardware assistance for synchronization
and mutual exclusion between heterogeneous processors and those not operating
@@ -32,286 +35,370 @@ structure).
A common hwspinlock interface makes it possible to have generic, platform-
independent, drivers.
-2. User API
+User API
+========
+
+::
struct hwspinlock *hwspin_lock_request(void);
- - dynamically assign an hwspinlock and return its address, or NULL
- in case an unused hwspinlock isn't available. Users of this
- API will usually want to communicate the lock's id to the remote core
- before it can be used to achieve synchronization.
- Should be called from a process context (might sleep).
+
+Dynamically assign an hwspinlock and return its address, or NULL
+in case an unused hwspinlock isn't available. Users of this
+API will usually want to communicate the lock's id to the remote core
+before it can be used to achieve synchronization.
+
+Should be called from a process context (might sleep).
+
+::
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
- - assign a specific hwspinlock id and return its address, or NULL
- if that hwspinlock is already in use. Usually board code will
- be calling this function in order to reserve specific hwspinlock
- ids for predefined purposes.
- Should be called from a process context (might sleep).
+
+Assign a specific hwspinlock id and return its address, or NULL
+if that hwspinlock is already in use. Usually board code will
+be calling this function in order to reserve specific hwspinlock
+ids for predefined purposes.
+
+Should be called from a process context (might sleep).
+
+::
int of_hwspin_lock_get_id(struct device_node *np, int index);
- - retrieve the global lock id for an OF phandle-based specific lock.
- This function provides a means for DT users of a hwspinlock module
- to get the global lock id of a specific hwspinlock, so that it can
- be requested using the normal hwspin_lock_request_specific() API.
- The function returns a lock id number on success, -EPROBE_DEFER if
- the hwspinlock device is not yet registered with the core, or other
- error values.
- Should be called from a process context (might sleep).
+
+Retrieve the global lock id for an OF phandle-based specific lock.
+This function provides a means for DT users of a hwspinlock module
+to get the global lock id of a specific hwspinlock, so that it can
+be requested using the normal hwspin_lock_request_specific() API.
+
+The function returns a lock id number on success, -EPROBE_DEFER if
+the hwspinlock device is not yet registered with the core, or other
+error values.
+
+Should be called from a process context (might sleep).
+
+::
int hwspin_lock_free(struct hwspinlock *hwlock);
- - free a previously-assigned hwspinlock; returns 0 on success, or an
- appropriate error code on failure (e.g. -EINVAL if the hwspinlock
- is already free).
- Should be called from a process context (might sleep).
+
+Free a previously-assigned hwspinlock; returns 0 on success, or an
+appropriate error code on failure (e.g. -EINVAL if the hwspinlock
+is already free).
+
+Should be called from a process context (might sleep).
+
+::
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
- - lock a previously-assigned hwspinlock with a timeout limit (specified in
- msecs). If the hwspinlock is already taken, the function will busy loop
- waiting for it to be released, but give up when the timeout elapses.
- Upon a successful return from this function, preemption is disabled so
- the caller must not sleep, and is advised to release the hwspinlock as
- soon as possible, in order to minimize remote cores polling on the
- hardware interconnect.
- Returns 0 when successful and an appropriate error code otherwise (most
- notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
- The function will never sleep.
+
+Lock a previously-assigned hwspinlock with a timeout limit (specified in
+msecs). If the hwspinlock is already taken, the function will busy loop
+waiting for it to be released, but give up when the timeout elapses.
+Upon a successful return from this function, preemption is disabled so
+the caller must not sleep, and is advised to release the hwspinlock as
+soon as possible, in order to minimize remote cores polling on the
+hardware interconnect.
+
+Returns 0 when successful and an appropriate error code otherwise (most
+notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
+The function will never sleep.
+
+::
int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int timeout);
- - lock a previously-assigned hwspinlock with a timeout limit (specified in
- msecs). If the hwspinlock is already taken, the function will busy loop
- waiting for it to be released, but give up when the timeout elapses.
- Upon a successful return from this function, preemption and the local
- interrupts are disabled, so the caller must not sleep, and is advised to
- release the hwspinlock as soon as possible.
- Returns 0 when successful and an appropriate error code otherwise (most
- notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
- The function will never sleep.
+
+Lock a previously-assigned hwspinlock with a timeout limit (specified in
+msecs). If the hwspinlock is already taken, the function will busy loop
+waiting for it to be released, but give up when the timeout elapses.
+Upon a successful return from this function, preemption and the local
+interrupts are disabled, so the caller must not sleep, and is advised to
+release the hwspinlock as soon as possible.
+
+Returns 0 when successful and an appropriate error code otherwise (most
+notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
+The function will never sleep.
+
+::
int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, unsigned int to,
- unsigned long *flags);
- - lock a previously-assigned hwspinlock with a timeout limit (specified in
- msecs). If the hwspinlock is already taken, the function will busy loop
- waiting for it to be released, but give up when the timeout elapses.
- Upon a successful return from this function, preemption is disabled,
- local interrupts are disabled and their previous state is saved at the
- given flags placeholder. The caller must not sleep, and is advised to
- release the hwspinlock as soon as possible.
- Returns 0 when successful and an appropriate error code otherwise (most
- notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
- The function will never sleep.
+ unsigned long *flags);
+
+Lock a previously-assigned hwspinlock with a timeout limit (specified in
+msecs). If the hwspinlock is already taken, the function will busy loop
+waiting for it to be released, but give up when the timeout elapses.
+Upon a successful return from this function, preemption is disabled,
+local interrupts are disabled and their previous state is saved at the
+given flags placeholder. The caller must not sleep, and is advised to
+release the hwspinlock as soon as possible.
+
+Returns 0 when successful and an appropriate error code otherwise (most
+notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
+
+The function will never sleep.
+
+::
int hwspin_trylock(struct hwspinlock *hwlock);
- - attempt to lock a previously-assigned hwspinlock, but immediately fail if
- it is already taken.
- Upon a successful return from this function, preemption is disabled so
- caller must not sleep, and is advised to release the hwspinlock as soon as
- possible, in order to minimize remote cores polling on the hardware
- interconnect.
- Returns 0 on success and an appropriate error code otherwise (most
- notably -EBUSY if the hwspinlock was already taken).
- The function will never sleep.
+
+
+Attempt to lock a previously-assigned hwspinlock, but immediately fail if
+it is already taken.
+
+Upon a successful return from this function, preemption is disabled so
+caller must not sleep, and is advised to release the hwspinlock as soon as
+possible, in order to minimize remote cores polling on the hardware
+interconnect.
+
+Returns 0 on success and an appropriate error code otherwise (most
+notably -EBUSY if the hwspinlock was already taken).
+The function will never sleep.
+
+::
int hwspin_trylock_irq(struct hwspinlock *hwlock);
- - attempt to lock a previously-assigned hwspinlock, but immediately fail if
- it is already taken.
- Upon a successful return from this function, preemption and the local
- interrupts are disabled so caller must not sleep, and is advised to
- release the hwspinlock as soon as possible.
- Returns 0 on success and an appropriate error code otherwise (most
- notably -EBUSY if the hwspinlock was already taken).
- The function will never sleep.
+
+
+Attempt to lock a previously-assigned hwspinlock, but immediately fail if
+it is already taken.
+
+Upon a successful return from this function, preemption and the local
+interrupts are disabled so caller must not sleep, and is advised to
+release the hwspinlock as soon as possible.
+
+Returns 0 on success and an appropriate error code otherwise (most
+notably -EBUSY if the hwspinlock was already taken).
+
+The function will never sleep.
+
+::
int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags);
- - attempt to lock a previously-assigned hwspinlock, but immediately fail if
- it is already taken.
- Upon a successful return from this function, preemption is disabled,
- the local interrupts are disabled and their previous state is saved
- at the given flags placeholder. The caller must not sleep, and is advised
- to release the hwspinlock as soon as possible.
- Returns 0 on success and an appropriate error code otherwise (most
- notably -EBUSY if the hwspinlock was already taken).
- The function will never sleep.
+
+Attempt to lock a previously-assigned hwspinlock, but immediately fail if
+it is already taken.
+
+Upon a successful return from this function, preemption is disabled,
+the local interrupts are disabled and their previous state is saved
+at the given flags placeholder. The caller must not sleep, and is advised
+to release the hwspinlock as soon as possible.
+
+Returns 0 on success and an appropriate error code otherwise (most
+notably -EBUSY if the hwspinlock was already taken).
+The function will never sleep.
+
+::
void hwspin_unlock(struct hwspinlock *hwlock);
- - unlock a previously-locked hwspinlock. Always succeed, and can be called
- from any context (the function never sleeps). Note: code should _never_
- unlock an hwspinlock which is already unlocked (there is no protection
- against this).
+
+Unlock a previously-locked hwspinlock. Always succeed, and can be called
+from any context (the function never sleeps).
+
+.. note::
+
+ code should **never** unlock an hwspinlock which is already unlocked
+ (there is no protection against this).
+
+::
void hwspin_unlock_irq(struct hwspinlock *hwlock);
- - unlock a previously-locked hwspinlock and enable local interrupts.
- The caller should _never_ unlock an hwspinlock which is already unlocked.
- Doing so is considered a bug (there is no protection against this).
- Upon a successful return from this function, preemption and local
- interrupts are enabled. This function will never sleep.
+
+Unlock a previously-locked hwspinlock and enable local interrupts.
+The caller should **never** unlock an hwspinlock which is already unlocked.
+
+Doing so is considered a bug (there is no protection against this).
+Upon a successful return from this function, preemption and local
+interrupts are enabled. This function will never sleep.
+
+::
void
hwspin_unlock_irqrestore(struct hwspinlock *hwlock, unsigned long *flags);
- - unlock a previously-locked hwspinlock.
- The caller should _never_ unlock an hwspinlock which is already unlocked.
- Doing so is considered a bug (there is no protection against this).
- Upon a successful return from this function, preemption is reenabled,
- and the state of the local interrupts is restored to the state saved at
- the given flags. This function will never sleep.
+
+Unlock a previously-locked hwspinlock.
+
+The caller should **never** unlock an hwspinlock which is already unlocked.
+Doing so is considered a bug (there is no protection against this).
+Upon a successful return from this function, preemption is reenabled,
+and the state of the local interrupts is restored to the state saved at
+the given flags. This function will never sleep.
+
+::
int hwspin_lock_get_id(struct hwspinlock *hwlock);
- - retrieve id number of a given hwspinlock. This is needed when an
- hwspinlock is dynamically assigned: before it can be used to achieve
- mutual exclusion with a remote cpu, the id number should be communicated
- to the remote task with which we want to synchronize.
- Returns the hwspinlock id number, or -EINVAL if hwlock is null.
-
-3. Typical usage
-
-#include <linux/hwspinlock.h>
-#include <linux/err.h>
-
-int hwspinlock_example1(void)
-{
- struct hwspinlock *hwlock;
- int ret;
-
- /* dynamically assign a hwspinlock */
- hwlock = hwspin_lock_request();
- if (!hwlock)
- ...
-
- id = hwspin_lock_get_id(hwlock);
- /* probably need to communicate id to a remote processor now */
-
- /* take the lock, spin for 1 sec if it's already taken */
- ret = hwspin_lock_timeout(hwlock, 1000);
- if (ret)
- ...
-
- /*
- * we took the lock, do our thing now, but do NOT sleep
- */
-
- /* release the lock */
- hwspin_unlock(hwlock);
-
- /* free the lock */
- ret = hwspin_lock_free(hwlock);
- if (ret)
- ...
-
- return ret;
-}
-
-int hwspinlock_example2(void)
-{
- struct hwspinlock *hwlock;
- int ret;
-
- /*
- * assign a specific hwspinlock id - this should be called early
- * by board init code.
- */
- hwlock = hwspin_lock_request_specific(PREDEFINED_LOCK_ID);
- if (!hwlock)
- ...
-
- /* try to take it, but don't spin on it */
- ret = hwspin_trylock(hwlock);
- if (!ret) {
- pr_info("lock is already taken\n");
- return -EBUSY;
- }
- /*
- * we took the lock, do our thing now, but do NOT sleep
- */
+Retrieve id number of a given hwspinlock. This is needed when an
+hwspinlock is dynamically assigned: before it can be used to achieve
+mutual exclusion with a remote cpu, the id number should be communicated
+to the remote task with which we want to synchronize.
+
+Returns the hwspinlock id number, or -EINVAL if hwlock is null.
+
+Typical usage
+=============
- /* release the lock */
- hwspin_unlock(hwlock);
+::
- /* free the lock */
- ret = hwspin_lock_free(hwlock);
- if (ret)
- ...
+ #include <linux/hwspinlock.h>
+ #include <linux/err.h>
- return ret;
-}
+ int hwspinlock_example1(void)
+ {
+ struct hwspinlock *hwlock;
+ int ret;
+ /* dynamically assign a hwspinlock */
+ hwlock = hwspin_lock_request();
+ if (!hwlock)
+ ...
-4. API for implementors
+ id = hwspin_lock_get_id(hwlock);
+ /* probably need to communicate id to a remote processor now */
+
+ /* take the lock, spin for 1 sec if it's already taken */
+ ret = hwspin_lock_timeout(hwlock, 1000);
+ if (ret)
+ ...
+
+ /*
+ * we took the lock, do our thing now, but do NOT sleep
+ */
+
+ /* release the lock */
+ hwspin_unlock(hwlock);
+
+ /* free the lock */
+ ret = hwspin_lock_free(hwlock);
+ if (ret)
+ ...
+
+ return ret;
+ }
+
+ int hwspinlock_example2(void)
+ {
+ struct hwspinlock *hwlock;
+ int ret;
+
+ /*
+ * assign a specific hwspinlock id - this should be called early
+ * by board init code.
+ */
+ hwlock = hwspin_lock_request_specific(PREDEFINED_LOCK_ID);
+ if (!hwlock)
+ ...
+
+ /* try to take it, but don't spin on it */
+ ret = hwspin_trylock(hwlock);
+ if (!ret) {
+ pr_info("lock is already taken\n");
+ return -EBUSY;
+ }
+
+ /*
+ * we took the lock, do our thing now, but do NOT sleep
+ */
+
+ /* release the lock */
+ hwspin_unlock(hwlock);
+
+ /* free the lock */
+ ret = hwspin_lock_free(hwlock);
+ if (ret)
+ ...
+
+ return ret;
+ }
+
+
+API for implementors
+====================
+
+::
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
const struct hwspinlock_ops *ops, int base_id, int num_locks);
- - to be called from the underlying platform-specific implementation, in
- order to register a new hwspinlock device (which is usually a bank of
- numerous locks). Should be called from a process context (this function
- might sleep).
- Returns 0 on success, or appropriate error code on failure.
+
+To be called from the underlying platform-specific implementation, in
+order to register a new hwspinlock device (which is usually a bank of
+numerous locks). Should be called from a process context (this function
+might sleep).
+
+Returns 0 on success, or appropriate error code on failure.
+
+::
int hwspin_lock_unregister(struct hwspinlock_device *bank);
- - to be called from the underlying vendor-specific implementation, in order
- to unregister an hwspinlock device (which is usually a bank of numerous
- locks).
- Should be called from a process context (this function might sleep).
- Returns the address of hwspinlock on success, or NULL on error (e.g.
- if the hwspinlock is still in use).
-5. Important structs
+To be called from the underlying vendor-specific implementation, in order
+to unregister an hwspinlock device (which is usually a bank of numerous
+locks).
+
+Should be called from a process context (this function might sleep).
+
+Returns the address of hwspinlock on success, or NULL on error (e.g.
+if the hwspinlock is still in use).
+
+Important structs
+=================
struct hwspinlock_device is a device which usually contains a bank
of hardware locks. It is registered by the underlying hwspinlock
implementation using the hwspin_lock_register() API.
-/**
- * struct hwspinlock_device - a device which usually spans numerous hwspinlocks
- * @dev: underlying device, will be used to invoke runtime PM api
- * @ops: platform-specific hwspinlock handlers
- * @base_id: id index of the first lock in this device
- * @num_locks: number of locks in this device
- * @lock: dynamically allocated array of 'struct hwspinlock'
- */
-struct hwspinlock_device {
- struct device *dev;
- const struct hwspinlock_ops *ops;
- int base_id;
- int num_locks;
- struct hwspinlock lock[0];
-};
+::
+
+ /**
+ * struct hwspinlock_device - a device which usually spans numerous hwspinlocks
+ * @dev: underlying device, will be used to invoke runtime PM api
+ * @ops: platform-specific hwspinlock handlers
+ * @base_id: id index of the first lock in this device
+ * @num_locks: number of locks in this device
+ * @lock: dynamically allocated array of 'struct hwspinlock'
+ */
+ struct hwspinlock_device {
+ struct device *dev;
+ const struct hwspinlock_ops *ops;
+ int base_id;
+ int num_locks;
+ struct hwspinlock lock[0];
+ };
struct hwspinlock_device contains an array of hwspinlock structs, each
-of which represents a single hardware lock:
-
-/**
- * struct hwspinlock - this struct represents a single hwspinlock instance
- * @bank: the hwspinlock_device structure which owns this lock
- * @lock: initialized and used by hwspinlock core
- * @priv: private data, owned by the underlying platform-specific hwspinlock drv
- */
-struct hwspinlock {
- struct hwspinlock_device *bank;
- spinlock_t lock;
- void *priv;
-};
+of which represents a single hardware lock::
+
+ /**
+ * struct hwspinlock - this struct represents a single hwspinlock instance
+ * @bank: the hwspinlock_device structure which owns this lock
+ * @lock: initialized and used by hwspinlock core
+ * @priv: private data, owned by the underlying platform-specific hwspinlock drv
+ */
+ struct hwspinlock {
+ struct hwspinlock_device *bank;
+ spinlock_t lock;
+ void *priv;
+ };
When registering a bank of locks, the hwspinlock driver only needs to
set the priv members of the locks. The rest of the members are set and
initialized by the hwspinlock core itself.
-6. Implementation callbacks
+Implementation callbacks
+========================
-There are three possible callbacks defined in 'struct hwspinlock_ops':
+There are three possible callbacks defined in 'struct hwspinlock_ops'::
-struct hwspinlock_ops {
- int (*trylock)(struct hwspinlock *lock);
- void (*unlock)(struct hwspinlock *lock);
- void (*relax)(struct hwspinlock *lock);
-};
+ struct hwspinlock_ops {
+ int (*trylock)(struct hwspinlock *lock);
+ void (*unlock)(struct hwspinlock *lock);
+ void (*relax)(struct hwspinlock *lock);
+ };
The first two callbacks are mandatory:
The ->trylock() callback should make a single attempt to take the lock, and
-return 0 on failure and 1 on success. This callback may _not_ sleep.
+return 0 on failure and 1 on success. This callback may **not** sleep.
The ->unlock() callback releases the lock. It always succeed, and it, too,
-may _not_ sleep.
+may **not** sleep.
The ->relax() callback is optional. It is called by hwspinlock core while
spinning on a lock, and can be used by the underlying implementation to force
-a delay between two successive invocations of ->trylock(). It may _not_ sleep.
+a delay between two successive invocations of ->trylock(). It may **not** sleep.
diff --git a/Documentation/input/index.rst b/Documentation/input/index.rst
index 7a3e71c2bd00..9888f5cbf6d5 100644
--- a/Documentation/input/index.rst
+++ b/Documentation/input/index.rst
@@ -6,7 +6,6 @@ Contents:
.. toctree::
:maxdepth: 2
- :numbered:
input_uapi
input_kapi
diff --git a/Documentation/intel_txt.txt b/Documentation/intel_txt.txt
index 91d89c540709..d83c1a2122c9 100644
--- a/Documentation/intel_txt.txt
+++ b/Documentation/intel_txt.txt
@@ -1,4 +1,5 @@
-Intel(R) TXT Overview:
+=====================
+Intel(R) TXT Overview
=====================
Intel's technology for safer computing, Intel(R) Trusted Execution
@@ -8,9 +9,10 @@ provide the building blocks for creating trusted platforms.
Intel TXT was formerly known by the code name LaGrande Technology (LT).
Intel TXT in Brief:
-o Provides dynamic root of trust for measurement (DRTM)
-o Data protection in case of improper shutdown
-o Measurement and verification of launched environment
+
+- Provides dynamic root of trust for measurement (DRTM)
+- Data protection in case of improper shutdown
+- Measurement and verification of launched environment
Intel TXT is part of the vPro(TM) brand and is also available some
non-vPro systems. It is currently available on desktop systems
@@ -24,16 +26,21 @@ which has been updated for the new released platforms.
Intel TXT has been presented at various events over the past few
years, some of which are:
- LinuxTAG 2008:
+
+ - LinuxTAG 2008:
http://www.linuxtag.org/2008/en/conf/events/vp-donnerstag.html
- TRUST2008:
+
+ - TRUST2008:
http://www.trust-conference.eu/downloads/Keynote-Speakers/
3_David-Grawrock_The-Front-Door-of-Trusted-Computing.pdf
- IDF, Shanghai:
+
+ - IDF, Shanghai:
http://www.prcidf.com.cn/index_en.html
- IDFs 2006, 2007 (I'm not sure if/where they are online)
-Trusted Boot Project Overview:
+ - IDFs 2006, 2007
+ (I'm not sure if/where they are online)
+
+Trusted Boot Project Overview
=============================
Trusted Boot (tboot) is an open source, pre-kernel/VMM module that
@@ -87,11 +94,12 @@ Intel-provided firmware).
How Does it Work?
=================
-o Tboot is an executable that is launched by the bootloader as
+- Tboot is an executable that is launched by the bootloader as
the "kernel" (the binary the bootloader executes).
-o It performs all of the work necessary to determine if the
+- It performs all of the work necessary to determine if the
platform supports Intel TXT and, if so, executes the GETSEC[SENTER]
processor instruction that initiates the dynamic root of trust.
+
- If tboot determines that the system does not support Intel TXT
or is not configured correctly (e.g. the SINIT AC Module was
incorrect), it will directly launch the kernel with no changes
@@ -99,12 +107,14 @@ o It performs all of the work necessary to determine if the
- Tboot will output various information about its progress to the
terminal, serial port, and/or an in-memory log; the output
locations can be configured with a command line switch.
-o The GETSEC[SENTER] instruction will return control to tboot and
+
+- The GETSEC[SENTER] instruction will return control to tboot and
tboot then verifies certain aspects of the environment (e.g. TPM NV
lock, e820 table does not have invalid entries, etc.).
-o It will wake the APs from the special sleep state the GETSEC[SENTER]
+- It will wake the APs from the special sleep state the GETSEC[SENTER]
instruction had put them in and place them into a wait-for-SIPI
state.
+
- Because the processors will not respond to an INIT or SIPI when
in the TXT environment, it is necessary to create a small VT-x
guest for the APs. When they run in this guest, they will
@@ -112,8 +122,10 @@ o It will wake the APs from the special sleep state the GETSEC[SENTER]
VMEXITs, and then disable VT and jump to the SIPI vector. This
approach seemed like a better choice than having to insert
special code into the kernel's MP wakeup sequence.
-o Tboot then applies an (optional) user-defined launch policy to
+
+- Tboot then applies an (optional) user-defined launch policy to
verify the kernel and initrd.
+
- This policy is rooted in TPM NV and is described in the tboot
project. The tboot project also contains code for tools to
create and provision the policy.
@@ -121,30 +133,34 @@ o Tboot then applies an (optional) user-defined launch policy to
then any kernel will be launched.
- Policy action is flexible and can include halting on failures
or simply logging them and continuing.
-o Tboot adjusts the e820 table provided by the bootloader to reserve
+
+- Tboot adjusts the e820 table provided by the bootloader to reserve
its own location in memory as well as to reserve certain other
TXT-related regions.
-o As part of its launch, tboot DMA protects all of RAM (using the
+- As part of its launch, tboot DMA protects all of RAM (using the
VT-d PMRs). Thus, the kernel must be booted with 'intel_iommu=on'
in order to remove this blanket protection and use VT-d's
page-level protection.
-o Tboot will populate a shared page with some data about itself and
+- Tboot will populate a shared page with some data about itself and
pass this to the Linux kernel as it transfers control.
+
- The location of the shared page is passed via the boot_params
struct as a physical address.
-o The kernel will look for the tboot shared page address and, if it
+
+- The kernel will look for the tboot shared page address and, if it
exists, map it.
-o As one of the checks/protections provided by TXT, it makes a copy
+- As one of the checks/protections provided by TXT, it makes a copy
of the VT-d DMARs in a DMA-protected region of memory and verifies
them for correctness. The VT-d code will detect if the kernel was
launched with tboot and use this copy instead of the one in the
ACPI table.
-o At this point, tboot and TXT are out of the picture until a
+- At this point, tboot and TXT are out of the picture until a
shutdown (S<n>)
-o In order to put a system into any of the sleep states after a TXT
+- In order to put a system into any of the sleep states after a TXT
launch, TXT must first be exited. This is to prevent attacks that
attempt to crash the system to gain control on reboot and steal
data left in memory.
+
- The kernel will perform all of its sleep preparation and
populate the shared page with the ACPI data needed to put the
platform in the desired sleep state.
@@ -172,7 +188,7 @@ o In order to put a system into any of the sleep states after a TXT
That's pretty much it for TXT support.
-Configuring the System:
+Configuring the System
======================
This code works with 32bit, 32bit PAE, and 64bit (x86_64) kernels.
@@ -181,7 +197,8 @@ In BIOS, the user must enable: TPM, TXT, VT-x, VT-d. Not all BIOSes
allow these to be individually enabled/disabled and the screens in
which to find them are BIOS-specific.
-grub.conf needs to be modified as follows:
+grub.conf needs to be modified as follows::
+
title Linux 2.6.29-tip w/ tboot
root (hd0,0)
kernel /tboot.gz logging=serial,vga,memory
diff --git a/Documentation/io-mapping.txt b/Documentation/io-mapping.txt
index 5ca78426f54c..a966239f04e4 100644
--- a/Documentation/io-mapping.txt
+++ b/Documentation/io-mapping.txt
@@ -1,66 +1,81 @@
+========================
+The io_mapping functions
+========================
+
+API
+===
+
The io_mapping functions in linux/io-mapping.h provide an abstraction for
efficiently mapping small regions of an I/O device to the CPU. The initial
usage is to support the large graphics aperture on 32-bit processors where
ioremap_wc cannot be used to statically map the entire aperture to the CPU
as it would consume too much of the kernel address space.
-A mapping object is created during driver initialization using
+A mapping object is created during driver initialization using::
struct io_mapping *io_mapping_create_wc(unsigned long base,
unsigned long size)
- 'base' is the bus address of the region to be made
- mappable, while 'size' indicates how large a mapping region to
- enable. Both are in bytes.
+'base' is the bus address of the region to be made
+mappable, while 'size' indicates how large a mapping region to
+enable. Both are in bytes.
- This _wc variant provides a mapping which may only be used
- with the io_mapping_map_atomic_wc or io_mapping_map_wc.
+This _wc variant provides a mapping which may only be used
+with the io_mapping_map_atomic_wc or io_mapping_map_wc.
With this mapping object, individual pages can be mapped either atomically
or not, depending on the necessary scheduling environment. Of course, atomic
-maps are more efficient:
+maps are more efficient::
void *io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
- 'offset' is the offset within the defined mapping region.
- Accessing addresses beyond the region specified in the
- creation function yields undefined results. Using an offset
- which is not page aligned yields an undefined result. The
- return value points to a single page in CPU address space.
+'offset' is the offset within the defined mapping region.
+Accessing addresses beyond the region specified in the
+creation function yields undefined results. Using an offset
+which is not page aligned yields an undefined result. The
+return value points to a single page in CPU address space.
+
+This _wc variant returns a write-combining map to the
+page and may only be used with mappings created by
+io_mapping_create_wc
- This _wc variant returns a write-combining map to the
- page and may only be used with mappings created by
- io_mapping_create_wc
+Note that the task may not sleep while holding this page
+mapped.
- Note that the task may not sleep while holding this page
- mapped.
+::
void io_mapping_unmap_atomic(void *vaddr)
- 'vaddr' must be the value returned by the last
- io_mapping_map_atomic_wc call. This unmaps the specified
- page and allows the task to sleep once again.
+'vaddr' must be the value returned by the last
+io_mapping_map_atomic_wc call. This unmaps the specified
+page and allows the task to sleep once again.
If you need to sleep while holding the lock, you can use the non-atomic
variant, although they may be significantly slower.
+::
+
void *io_mapping_map_wc(struct io_mapping *mapping,
unsigned long offset)
- This works like io_mapping_map_atomic_wc except it allows
- the task to sleep while holding the page mapped.
+This works like io_mapping_map_atomic_wc except it allows
+the task to sleep while holding the page mapped.
+
+
+::
void io_mapping_unmap(void *vaddr)
- This works like io_mapping_unmap_atomic, except it is used
- for pages mapped with io_mapping_map_wc.
+This works like io_mapping_unmap_atomic, except it is used
+for pages mapped with io_mapping_map_wc.
-At driver close time, the io_mapping object must be freed:
+At driver close time, the io_mapping object must be freed::
void io_mapping_free(struct io_mapping *mapping)
-Current Implementation:
+Current Implementation
+======================
The initial implementation of these functions uses existing mapping
mechanisms and so provides only an abstraction layer and no new
diff --git a/Documentation/io_ordering.txt b/Documentation/io_ordering.txt
index 9faae6f26d32..2ab303ce9a0d 100644
--- a/Documentation/io_ordering.txt
+++ b/Documentation/io_ordering.txt
@@ -1,3 +1,7 @@
+==============================================
+Ordering I/O writes to memory-mapped addresses
+==============================================
+
On some platforms, so-called memory-mapped I/O is weakly ordered. On such
platforms, driver writers are responsible for ensuring that I/O writes to
memory-mapped addresses on their device arrive in the order intended. This is
@@ -8,39 +12,39 @@ critical section of code protected by spinlocks. This would ensure that
subsequent writes to I/O space arrived only after all prior writes (much like a
memory barrier op, mb(), only with respect to I/O).
-A more concrete example from a hypothetical device driver:
+A more concrete example from a hypothetical device driver::
- ...
-CPU A: spin_lock_irqsave(&dev_lock, flags)
-CPU A: val = readl(my_status);
-CPU A: ...
-CPU A: writel(newval, ring_ptr);
-CPU A: spin_unlock_irqrestore(&dev_lock, flags)
- ...
-CPU B: spin_lock_irqsave(&dev_lock, flags)
-CPU B: val = readl(my_status);
-CPU B: ...
-CPU B: writel(newval2, ring_ptr);
-CPU B: spin_unlock_irqrestore(&dev_lock, flags)
- ...
+ ...
+ CPU A: spin_lock_irqsave(&dev_lock, flags)
+ CPU A: val = readl(my_status);
+ CPU A: ...
+ CPU A: writel(newval, ring_ptr);
+ CPU A: spin_unlock_irqrestore(&dev_lock, flags)
+ ...
+ CPU B: spin_lock_irqsave(&dev_lock, flags)
+ CPU B: val = readl(my_status);
+ CPU B: ...
+ CPU B: writel(newval2, ring_ptr);
+ CPU B: spin_unlock_irqrestore(&dev_lock, flags)
+ ...
In the case above, the device may receive newval2 before it receives newval,
-which could cause problems. Fixing it is easy enough though:
+which could cause problems. Fixing it is easy enough though::
- ...
-CPU A: spin_lock_irqsave(&dev_lock, flags)
-CPU A: val = readl(my_status);
-CPU A: ...
-CPU A: writel(newval, ring_ptr);
-CPU A: (void)readl(safe_register); /* maybe a config register? */
-CPU A: spin_unlock_irqrestore(&dev_lock, flags)
- ...
-CPU B: spin_lock_irqsave(&dev_lock, flags)
-CPU B: val = readl(my_status);
-CPU B: ...
-CPU B: writel(newval2, ring_ptr);
-CPU B: (void)readl(safe_register); /* maybe a config register? */
-CPU B: spin_unlock_irqrestore(&dev_lock, flags)
+ ...
+ CPU A: spin_lock_irqsave(&dev_lock, flags)
+ CPU A: val = readl(my_status);
+ CPU A: ...
+ CPU A: writel(newval, ring_ptr);
+ CPU A: (void)readl(safe_register); /* maybe a config register? */
+ CPU A: spin_unlock_irqrestore(&dev_lock, flags)
+ ...
+ CPU B: spin_lock_irqsave(&dev_lock, flags)
+ CPU B: val = readl(my_status);
+ CPU B: ...
+ CPU B: writel(newval2, ring_ptr);
+ CPU B: (void)readl(safe_register); /* maybe a config register? */
+ CPU B: spin_unlock_irqrestore(&dev_lock, flags)
Here, the reads from safe_register will cause the I/O chipset to flush any
pending writes before actually posting the read to the chipset, preventing
diff --git a/Documentation/iostats.txt b/Documentation/iostats.txt
index 65f694f2d1c9..04d394a2e06c 100644
--- a/Documentation/iostats.txt
+++ b/Documentation/iostats.txt
@@ -1,49 +1,50 @@
+=====================
I/O statistics fields
----------------
+=====================
Since 2.4.20 (and some versions before, with patches), and 2.5.45,
more extensive disk statistics have been introduced to help measure disk
-activity. Tools such as sar and iostat typically interpret these and do
+activity. Tools such as ``sar`` and ``iostat`` typically interpret these and do
the work for you, but in case you are interested in creating your own
tools, the fields are explained here.
In 2.4 now, the information is found as additional fields in
-/proc/partitions. In 2.6, the same information is found in two
-places: one is in the file /proc/diskstats, and the other is within
+``/proc/partitions``. In 2.6 and upper, the same information is found in two
+places: one is in the file ``/proc/diskstats``, and the other is within
the sysfs file system, which must be mounted in order to obtain
the information. Throughout this document we'll assume that sysfs
-is mounted on /sys, although of course it may be mounted anywhere.
-Both /proc/diskstats and sysfs use the same source for the information
+is mounted on ``/sys``, although of course it may be mounted anywhere.
+Both ``/proc/diskstats`` and sysfs use the same source for the information
and so should not differ.
-Here are examples of these different formats:
+Here are examples of these different formats::
-2.4:
- 3 0 39082680 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
- 3 1 9221278 hda1 35486 0 35496 38030 0 0 0 0 0 38030 38030
+ 2.4:
+ 3 0 39082680 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
+ 3 1 9221278 hda1 35486 0 35496 38030 0 0 0 0 0 38030 38030
+ 2.6+ sysfs:
+ 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
+ 35486 38030 38030 38030
-2.6 sysfs:
- 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
- 35486 38030 38030 38030
+ 2.6+ diskstats:
+ 3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
+ 3 1 hda1 35486 38030 38030 38030
-2.6 diskstats:
- 3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
- 3 1 hda1 35486 38030 38030 38030
+On 2.4 you might execute ``grep 'hda ' /proc/partitions``. On 2.6+, you have
+a choice of ``cat /sys/block/hda/stat`` or ``grep 'hda ' /proc/diskstats``.
-On 2.4 you might execute "grep 'hda ' /proc/partitions". On 2.6, you have
-a choice of "cat /sys/block/hda/stat" or "grep 'hda ' /proc/diskstats".
The advantage of one over the other is that the sysfs choice works well
-if you are watching a known, small set of disks. /proc/diskstats may
+if you are watching a known, small set of disks. ``/proc/diskstats`` may
be a better choice if you are watching a large number of disks because
you'll avoid the overhead of 50, 100, or 500 or more opens/closes with
each snapshot of your disk statistics.
In 2.4, the statistics fields are those after the device name. In
the above example, the first field of statistics would be 446216.
-By contrast, in 2.6 if you look at /sys/block/hda/stat, you'll
+By contrast, in 2.6+ if you look at ``/sys/block/hda/stat``, you'll
find just the eleven fields, beginning with 446216. If you look at
-/proc/diskstats, the eleven fields will be preceded by the major and
+``/proc/diskstats``, the eleven fields will be preceded by the major and
minor device numbers, and device name. Each of these formats provides
eleven fields of statistics, each meaning exactly the same things.
All fields except field 9 are cumulative since boot. Field 9 should
@@ -59,30 +60,40 @@ system-wide stats you'll have to find all the devices and sum them all up.
Field 1 -- # of reads completed
This is the total number of reads completed successfully.
+
Field 2 -- # of reads merged, field 6 -- # of writes merged
Reads and writes which are adjacent to each other may be merged for
efficiency. Thus two 4K reads may become one 8K read before it is
ultimately handed to the disk, and so it will be counted (and queued)
as only one I/O. This field lets you know how often this was done.
+
Field 3 -- # of sectors read
This is the total number of sectors read successfully.
+
Field 4 -- # of milliseconds spent reading
This is the total number of milliseconds spent by all reads (as
measured from __make_request() to end_that_request_last()).
+
Field 5 -- # of writes completed
This is the total number of writes completed successfully.
+
Field 6 -- # of writes merged
See the description of field 2.
+
Field 7 -- # of sectors written
This is the total number of sectors written successfully.
+
Field 8 -- # of milliseconds spent writing
This is the total number of milliseconds spent by all writes (as
measured from __make_request() to end_that_request_last()).
+
Field 9 -- # of I/Os currently in progress
The only field that should go to zero. Incremented as requests are
given to appropriate struct request_queue and decremented as they finish.
+
Field 10 -- # of milliseconds spent doing I/Os
This field increases so long as field 9 is nonzero.
+
Field 11 -- weighted # of milliseconds spent doing I/Os
This field is incremented at each I/O start, I/O completion, I/O
merge, or read of these stats by the number of I/Os in progress
@@ -97,7 +108,7 @@ introduced when changes collide, so (for instance) adding up all the
read I/Os issued per partition should equal those made to the disks ...
but due to the lack of locking it may only be very close.
-In 2.6, there are counters for each CPU, which make the lack of locking
+In 2.6+, there are counters for each CPU, which make the lack of locking
almost a non-issue. When the statistics are read, the per-CPU counters
are summed (possibly overflowing the unsigned long variable they are
summed to) and the result given to the user. There is no convenient
@@ -106,22 +117,25 @@ user interface for accessing the per-CPU counters themselves.
Disks vs Partitions
-------------------
-There were significant changes between 2.4 and 2.6 in the I/O subsystem.
+There were significant changes between 2.4 and 2.6+ in the I/O subsystem.
As a result, some statistic information disappeared. The translation from
a disk address relative to a partition to the disk address relative to
the host disk happens much earlier. All merges and timings now happen
at the disk level rather than at both the disk and partition level as
-in 2.4. Consequently, you'll see a different statistics output on 2.6 for
+in 2.4. Consequently, you'll see a different statistics output on 2.6+ for
partitions from that for disks. There are only *four* fields available
-for partitions on 2.6 machines. This is reflected in the examples above.
+for partitions on 2.6+ machines. This is reflected in the examples above.
Field 1 -- # of reads issued
This is the total number of reads issued to this partition.
+
Field 2 -- # of sectors read
This is the total number of sectors requested to be read from this
partition.
+
Field 3 -- # of writes issued
This is the total number of writes issued to this partition.
+
Field 4 -- # of sectors written
This is the total number of sectors requested to be written to
this partition.
@@ -149,16 +163,16 @@ to some (probably insignificant) inaccuracy.
Additional notes
----------------
-In 2.6, sysfs is not mounted by default. If your distribution of
+In 2.6+, sysfs is not mounted by default. If your distribution of
Linux hasn't added it already, here's the line you'll want to add to
-your /etc/fstab:
+your ``/etc/fstab``::
-none /sys sysfs defaults 0 0
+ none /sys sysfs defaults 0 0
-In 2.6, all disk statistics were removed from /proc/stat. In 2.4, they
-appear in both /proc/partitions and /proc/stat, although the ones in
-/proc/stat take a very different format from those in /proc/partitions
+In 2.6+, all disk statistics were removed from ``/proc/stat``. In 2.4, they
+appear in both ``/proc/partitions`` and ``/proc/stat``, although the ones in
+``/proc/stat`` take a very different format from those in ``/proc/partitions``
(see proc(5), if your system has it.)
-- ricklind@us.ibm.com
diff --git a/Documentation/irqflags-tracing.txt b/Documentation/irqflags-tracing.txt
index f6da05670e16..bdd208259fb3 100644
--- a/Documentation/irqflags-tracing.txt
+++ b/Documentation/irqflags-tracing.txt
@@ -1,8 +1,10 @@
+=======================
IRQ-flags state tracing
+=======================
-started by Ingo Molnar <mingo@redhat.com>
+:Author: started by Ingo Molnar <mingo@redhat.com>
-the "irq-flags tracing" feature "traces" hardirq and softirq state, in
+The "irq-flags tracing" feature "traces" hardirq and softirq state, in
that it gives interested subsystems an opportunity to be notified of
every hardirqs-off/hardirqs-on, softirqs-off/softirqs-on event that
happens in the kernel.
@@ -14,7 +16,7 @@ CONFIG_PROVE_RWSEM_LOCKING will be offered on an architecture - these
are locking APIs that are not used in IRQ context. (the one exception
for rwsems is worked around)
-architecture support for this is certainly not in the "trivial"
+Architecture support for this is certainly not in the "trivial"
category, because lots of lowlevel assembly code deal with irq-flags
state changes. But an architecture can be irq-flags-tracing enabled in a
rather straightforward and risk-free manner.
@@ -41,7 +43,7 @@ irq-flags-tracing support:
excluded from the irq-tracing [and lock validation] mechanism via
lockdep_off()/lockdep_on().
-in general there is no risk from having an incomplete irq-flags-tracing
+In general there is no risk from having an incomplete irq-flags-tracing
implementation in an architecture: lockdep will detect that and will
turn itself off. I.e. the lock validator will still be reliable. There
should be no crashes due to irq-tracing bugs. (except if the assembly
diff --git a/Documentation/isa.txt b/Documentation/isa.txt
index f232c26a40be..def4a7b690b5 100644
--- a/Documentation/isa.txt
+++ b/Documentation/isa.txt
@@ -1,5 +1,6 @@
+===========
ISA Drivers
------------
+===========
The following text is adapted from the commit message of the initial
commit of the ISA bus driver authored by Rene Herman.
@@ -23,17 +24,17 @@ that all device creation has been made internal as well.
The usage model this provides is nice, and has been acked from the ALSA
side by Takashi Iwai and Jaroslav Kysela. The ALSA driver module_init's
-now (for oldisa-only drivers) become:
+now (for oldisa-only drivers) become::
-static int __init alsa_card_foo_init(void)
-{
- return isa_register_driver(&snd_foo_isa_driver, SNDRV_CARDS);
-}
+ static int __init alsa_card_foo_init(void)
+ {
+ return isa_register_driver(&snd_foo_isa_driver, SNDRV_CARDS);
+ }
-static void __exit alsa_card_foo_exit(void)
-{
- isa_unregister_driver(&snd_foo_isa_driver);
-}
+ static void __exit alsa_card_foo_exit(void)
+ {
+ isa_unregister_driver(&snd_foo_isa_driver);
+ }
Quite like the other bus models therefore. This removes a lot of
duplicated init code from the ALSA ISA drivers.
@@ -47,11 +48,11 @@ parameter, indicating how many devices to create and call our methods
with.
The platform_driver callbacks are called with a platform_device param;
-the isa_driver callbacks are being called with a "struct device *dev,
-unsigned int id" pair directly -- with the device creation completely
+the isa_driver callbacks are being called with a ``struct device *dev,
+unsigned int id`` pair directly -- with the device creation completely
internal to the bus it's much cleaner to not leak isa_dev's by passing
them in at all. The id is the only thing we ever want other then the
-struct device * anyways, and it makes for nicer code in the callbacks as
+struct device anyways, and it makes for nicer code in the callbacks as
well.
With this additional .match() callback ISA drivers have all options. If
@@ -75,20 +76,20 @@ This exports only two functions; isa_{,un}register_driver().
isa_register_driver() register's the struct device_driver, and then
loops over the passed in ndev creating devices and registering them.
-This causes the bus match method to be called for them, which is:
+This causes the bus match method to be called for them, which is::
-int isa_bus_match(struct device *dev, struct device_driver *driver)
-{
- struct isa_driver *isa_driver = to_isa_driver(driver);
+ int isa_bus_match(struct device *dev, struct device_driver *driver)
+ {
+ struct isa_driver *isa_driver = to_isa_driver(driver);
- if (dev->platform_data == isa_driver) {
- if (!isa_driver->match ||
- isa_driver->match(dev, to_isa_dev(dev)->id))
- return 1;
- dev->platform_data = NULL;
- }
- return 0;
-}
+ if (dev->platform_data == isa_driver) {
+ if (!isa_driver->match ||
+ isa_driver->match(dev, to_isa_dev(dev)->id))
+ return 1;
+ dev->platform_data = NULL;
+ }
+ return 0;
+ }
The first thing this does is check if this device is in fact one of this
driver's devices by seeing if the device's platform_data pointer is set
@@ -102,7 +103,7 @@ well.
Then, if the the driver did not provide a .match, it matches. If it did,
the driver match() method is called to determine a match.
-If it did _not_ match, dev->platform_data is reset to indicate this to
+If it did **not** match, dev->platform_data is reset to indicate this to
isa_register_driver which can then unregister the device again.
If during all this, there's any error, or no devices matched at all
diff --git a/Documentation/isapnp.txt b/Documentation/isapnp.txt
index 400d1b5b523d..8d0840ac847b 100644
--- a/Documentation/isapnp.txt
+++ b/Documentation/isapnp.txt
@@ -1,3 +1,4 @@
+==========================================================
ISA Plug & Play support by Jaroslav Kysela <perex@suse.cz>
==========================================================
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index 615434d81108..51814450a7f8 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -112,8 +112,8 @@ There are two possible methods of using Kdump.
2) Or use the system kernel binary itself as dump-capture kernel and there is
no need to build a separate dump-capture kernel. This is possible
only with the architectures which support a relocatable kernel. As
- of today, i386, x86_64, ppc64, ia64 and arm architectures support relocatable
- kernel.
+ of today, i386, x86_64, ppc64, ia64, arm and arm64 architectures support
+ relocatable kernel.
Building a relocatable kernel is advantageous from the point of view that
one does not have to build a second kernel for capturing the dump. But
@@ -339,7 +339,7 @@ For arm:
For arm64:
- Use vmlinux or Image
-If you are using a uncompressed vmlinux image then use following command
+If you are using an uncompressed vmlinux image then use following command
to load dump-capture kernel.
kexec -p <dump-capture-kernel-vmlinux-image> \
@@ -361,6 +361,12 @@ to load dump-capture kernel.
--dtb=<dtb-for-dump-capture-kernel> \
--append="root=<root-dev> <arch-specific-options>"
+If you are using an uncompressed Image, then use following command
+to load dump-capture kernel.
+
+ kexec -p <dump-capture-kernel-Image> \
+ --initrd=<initrd-for-dump-capture-kernel> \
+ --append="root=<root-dev> <arch-specific-options>"
Please note, that --args-linux does not need to be specified for ia64.
It is planned to make this a no-op on that architecture, but for now
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
index 2cb7dc5c0e0d..0f00f9c164ac 100644
--- a/Documentation/kernel-per-CPU-kthreads.txt
+++ b/Documentation/kernel-per-CPU-kthreads.txt
@@ -1,27 +1,29 @@
-REDUCING OS JITTER DUE TO PER-CPU KTHREADS
+==========================================
+Reducing OS jitter due to per-cpu kthreads
+==========================================
This document lists per-CPU kthreads in the Linux kernel and presents
options to control their OS jitter. Note that non-per-CPU kthreads are
not listed here. To reduce OS jitter from non-per-CPU kthreads, bind
them to a "housekeeping" CPU dedicated to such work.
+References
+==========
-REFERENCES
+- Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs.
-o Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs.
+- Documentation/cgroup-v1: Using cgroups to bind tasks to sets of CPUs.
-o Documentation/cgroup-v1: Using cgroups to bind tasks to sets of CPUs.
-
-o man taskset: Using the taskset command to bind tasks to sets
+- man taskset: Using the taskset command to bind tasks to sets
of CPUs.
-o man sched_setaffinity: Using the sched_setaffinity() system
+- man sched_setaffinity: Using the sched_setaffinity() system
call to bind tasks to sets of CPUs.
-o /sys/devices/system/cpu/cpuN/online: Control CPU N's hotplug state,
+- /sys/devices/system/cpu/cpuN/online: Control CPU N's hotplug state,
writing "0" to offline and "1" to online.
-o In order to locate kernel-generated OS jitter on CPU N:
+- In order to locate kernel-generated OS jitter on CPU N:
cd /sys/kernel/debug/tracing
echo 1 > max_graph_depth # Increase the "1" for more detail
@@ -29,12 +31,17 @@ o In order to locate kernel-generated OS jitter on CPU N:
# run workload
cat per_cpu/cpuN/trace
+kthreads
+========
+
+Name:
+ ehca_comp/%u
-KTHREADS
+Purpose:
+ Periodically process Infiniband-related work.
-Name: ehca_comp/%u
-Purpose: Periodically process Infiniband-related work.
To reduce its OS jitter, do any of the following:
+
1. Don't use eHCA Infiniband hardware, instead choosing hardware
that does not require per-CPU kthreads. This will prevent these
kthreads from being created in the first place. (This will
@@ -46,26 +53,45 @@ To reduce its OS jitter, do any of the following:
provisioned only on selected CPUs.
-Name: irq/%d-%s
-Purpose: Handle threaded interrupts.
+Name:
+ irq/%d-%s
+
+Purpose:
+ Handle threaded interrupts.
+
To reduce its OS jitter, do the following:
+
1. Use irq affinity to force the irq threads to execute on
some other CPU.
-Name: kcmtpd_ctr_%d
-Purpose: Handle Bluetooth work.
+Name:
+ kcmtpd_ctr_%d
+
+Purpose:
+ Handle Bluetooth work.
+
To reduce its OS jitter, do one of the following:
+
1. Don't use Bluetooth, in which case these kthreads won't be
created in the first place.
2. Use irq affinity to force Bluetooth-related interrupts to
occur on some other CPU and furthermore initiate all
Bluetooth activity on some other CPU.
-Name: ksoftirqd/%u
-Purpose: Execute softirq handlers when threaded or when under heavy load.
+Name:
+ ksoftirqd/%u
+
+Purpose:
+ Execute softirq handlers when threaded or when under heavy load.
+
To reduce its OS jitter, each softirq vector must be handled
separately as follows:
-TIMER_SOFTIRQ: Do all of the following:
+
+TIMER_SOFTIRQ
+-------------
+
+Do all of the following:
+
1. To the extent possible, keep the CPU out of the kernel when it
is non-idle, for example, by avoiding system calls and by forcing
both kernel threads and interrupts to execute elsewhere.
@@ -76,34 +102,59 @@ TIMER_SOFTIRQ: Do all of the following:
first one back online. Once you have onlined the CPUs in question,
do not offline any other CPUs, because doing so could force the
timer back onto one of the CPUs in question.
-NET_TX_SOFTIRQ and NET_RX_SOFTIRQ: Do all of the following:
+
+NET_TX_SOFTIRQ and NET_RX_SOFTIRQ
+---------------------------------
+
+Do all of the following:
+
1. Force networking interrupts onto other CPUs.
2. Initiate any network I/O on other CPUs.
3. Once your application has started, prevent CPU-hotplug operations
from being initiated from tasks that might run on the CPU to
be de-jittered. (It is OK to force this CPU offline and then
bring it back online before you start your application.)
-BLOCK_SOFTIRQ: Do all of the following:
+
+BLOCK_SOFTIRQ
+-------------
+
+Do all of the following:
+
1. Force block-device interrupts onto some other CPU.
2. Initiate any block I/O on other CPUs.
3. Once your application has started, prevent CPU-hotplug operations
from being initiated from tasks that might run on the CPU to
be de-jittered. (It is OK to force this CPU offline and then
bring it back online before you start your application.)
-IRQ_POLL_SOFTIRQ: Do all of the following:
+
+IRQ_POLL_SOFTIRQ
+----------------
+
+Do all of the following:
+
1. Force block-device interrupts onto some other CPU.
2. Initiate any block I/O and block-I/O polling on other CPUs.
3. Once your application has started, prevent CPU-hotplug operations
from being initiated from tasks that might run on the CPU to
be de-jittered. (It is OK to force this CPU offline and then
bring it back online before you start your application.)
-TASKLET_SOFTIRQ: Do one or more of the following:
+
+TASKLET_SOFTIRQ
+---------------
+
+Do one or more of the following:
+
1. Avoid use of drivers that use tasklets. (Such drivers will contain
calls to things like tasklet_schedule().)
2. Convert all drivers that you must use from tasklets to workqueues.
3. Force interrupts for drivers using tasklets onto other CPUs,
and also do I/O involving these drivers on other CPUs.
-SCHED_SOFTIRQ: Do all of the following:
+
+SCHED_SOFTIRQ
+-------------
+
+Do all of the following:
+
1. Avoid sending scheduler IPIs to the CPU to be de-jittered,
for example, ensure that at most one runnable kthread is present
on that CPU. If a thread that expects to run on the de-jittered
@@ -120,7 +171,12 @@ SCHED_SOFTIRQ: Do all of the following:
forcing both kernel threads and interrupts to execute elsewhere.
This further reduces the number of scheduler-clock interrupts
received by the de-jittered CPU.
-HRTIMER_SOFTIRQ: Do all of the following:
+
+HRTIMER_SOFTIRQ
+---------------
+
+Do all of the following:
+
1. To the extent possible, keep the CPU out of the kernel when it
is non-idle. For example, avoid system calls and force both
kernel threads and interrupts to execute elsewhere.
@@ -131,9 +187,15 @@ HRTIMER_SOFTIRQ: Do all of the following:
back online. Once you have onlined the CPUs in question, do not
offline any other CPUs, because doing so could force the timer
back onto one of the CPUs in question.
-RCU_SOFTIRQ: Do at least one of the following:
+
+RCU_SOFTIRQ
+-----------
+
+Do at least one of the following:
+
1. Offload callbacks and keep the CPU in either dyntick-idle or
adaptive-ticks state by doing all of the following:
+
a. CONFIG_NO_HZ_FULL=y and ensure that the CPU to be
de-jittered is marked as an adaptive-ticks CPU using the
"nohz_full=" boot parameter. Bind the rcuo kthreads to
@@ -142,8 +204,10 @@ RCU_SOFTIRQ: Do at least one of the following:
when it is non-idle, for example, by avoiding system
calls and by forcing both kernel threads and interrupts
to execute elsewhere.
+
2. Enable RCU to do its processing remotely via dyntick-idle by
doing all of the following:
+
a. Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y.
b. Ensure that the CPU goes idle frequently, allowing other
CPUs to detect that it has passed through an RCU quiescent
@@ -155,15 +219,20 @@ RCU_SOFTIRQ: Do at least one of the following:
calls and by forcing both kernel threads and interrupts
to execute elsewhere.
-Name: kworker/%u:%d%s (cpu, id, priority)
-Purpose: Execute workqueue requests
+Name:
+ kworker/%u:%d%s (cpu, id, priority)
+
+Purpose:
+ Execute workqueue requests
+
To reduce its OS jitter, do any of the following:
+
1. Run your workload at a real-time priority, which will allow
preempting the kworker daemons.
2. A given workqueue can be made visible in the sysfs filesystem
by passing the WQ_SYSFS to that workqueue's alloc_workqueue().
Such a workqueue can be confined to a given subset of the
- CPUs using the /sys/devices/virtual/workqueue/*/cpumask sysfs
+ CPUs using the ``/sys/devices/virtual/workqueue/*/cpumask`` sysfs
files. The set of WQ_SYSFS workqueues can be displayed using
"ls sys/devices/virtual/workqueue". That said, the workqueues
maintainer would like to caution people against indiscriminately
@@ -173,6 +242,7 @@ To reduce its OS jitter, do any of the following:
to remove it, even if its addition was a mistake.
3. Do any of the following needed to avoid jitter that your
application cannot tolerate:
+
a. Build your kernel with CONFIG_SLUB=y rather than
CONFIG_SLAB=y, thus avoiding the slab allocator's periodic
use of each CPU's workqueues to run its cache_reap()
@@ -186,6 +256,7 @@ To reduce its OS jitter, do any of the following:
be able to build your kernel with CONFIG_CPU_FREQ=n to
avoid the CPU-frequency governor periodically running
on each CPU, including cs_dbs_timer() and od_dbs_timer().
+
WARNING: Please check your CPU specifications to
make sure that this is safe on your particular system.
d. As of v3.18, Christoph Lameter's on-demand vmstat workers
@@ -222,9 +293,14 @@ To reduce its OS jitter, do any of the following:
CONFIG_PMAC_RACKMETER=n to disable the CPU-meter,
avoiding OS jitter from rackmeter_do_timer().
-Name: rcuc/%u
-Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels.
+Name:
+ rcuc/%u
+
+Purpose:
+ Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels.
+
To reduce its OS jitter, do at least one of the following:
+
1. Build the kernel with CONFIG_PREEMPT=n. This prevents these
kthreads from being created in the first place, and also obviates
the need for RCU priority boosting. This approach is feasible
@@ -244,9 +320,14 @@ To reduce its OS jitter, do at least one of the following:
CPU, again preventing the rcuc/%u kthreads from having any work
to do.
-Name: rcuob/%d, rcuop/%d, and rcuos/%d
-Purpose: Offload RCU callbacks from the corresponding CPU.
+Name:
+ rcuob/%d, rcuop/%d, and rcuos/%d
+
+Purpose:
+ Offload RCU callbacks from the corresponding CPU.
+
To reduce its OS jitter, do at least one of the following:
+
1. Use affinity, cgroups, or other mechanism to force these kthreads
to execute on some other CPU.
2. Build with CONFIG_RCU_NOCB_CPU=n, which will prevent these
@@ -254,9 +335,14 @@ To reduce its OS jitter, do at least one of the following:
note that this will not eliminate OS jitter, but will instead
shift it to RCU_SOFTIRQ.
-Name: watchdog/%u
-Purpose: Detect software lockups on each CPU.
+Name:
+ watchdog/%u
+
+Purpose:
+ Detect software lockups on each CPU.
+
To reduce its OS jitter, do at least one of the following:
+
1. Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these
kthreads from being created in the first place.
2. Boot with "nosoftlockup=0", which will also prevent these kthreads
diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt
index 1be59a3a521c..fc9485d79061 100644
--- a/Documentation/kobject.txt
+++ b/Documentation/kobject.txt
@@ -1,13 +1,13 @@
+=====================================================================
Everything you never wanted to know about kobjects, ksets, and ktypes
+=====================================================================
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+:Author: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+:Last updated: December 19, 2007
Based on an original article by Jon Corbet for lwn.net written October 1,
2003 and located at http://lwn.net/Articles/51437/
-Last updated December 19, 2007
-
-
Part of the difficulty in understanding the driver model - and the kobject
abstraction upon which it is built - is that there is no obvious starting
place. Dealing with kobjects requires understanding a few different types,
@@ -47,6 +47,7 @@ approach will be taken, so we'll go back to kobjects.
Embedding kobjects
+==================
It is rare for kernel code to create a standalone kobject, with one major
exception explained below. Instead, kobjects are used to control access to
@@ -65,7 +66,7 @@ their own, but are invariably found embedded in the larger objects of
interest.)
So, for example, the UIO code in drivers/uio/uio.c has a structure that
-defines the memory region associated with a uio device:
+defines the memory region associated with a uio device::
struct uio_map {
struct kobject kobj;
@@ -77,7 +78,7 @@ just a matter of using the kobj member. Code that works with kobjects will
often have the opposite problem, however: given a struct kobject pointer,
what is the pointer to the containing structure? You must avoid tricks
(such as assuming that the kobject is at the beginning of the structure)
-and, instead, use the container_of() macro, found in <linux/kernel.h>:
+and, instead, use the container_of() macro, found in <linux/kernel.h>::
container_of(pointer, type, member)
@@ -90,13 +91,13 @@ where:
The return value from container_of() is a pointer to the corresponding
container type. So, for example, a pointer "kp" to a struct kobject
embedded *within* a struct uio_map could be converted to a pointer to the
-*containing* uio_map structure with:
+*containing* uio_map structure with::
struct uio_map *u_map = container_of(kp, struct uio_map, kobj);
For convenience, programmers often define a simple macro for "back-casting"
kobject pointers to the containing type. Exactly this happens in the
-earlier drivers/uio/uio.c, as you can see here:
+earlier drivers/uio/uio.c, as you can see here::
struct uio_map {
struct kobject kobj;
@@ -106,23 +107,25 @@ earlier drivers/uio/uio.c, as you can see here:
#define to_map(map) container_of(map, struct uio_map, kobj)
where the macro argument "map" is a pointer to the struct kobject in
-question. That macro is subsequently invoked with:
+question. That macro is subsequently invoked with::
struct uio_map *map = to_map(kobj);
Initialization of kobjects
+==========================
Code which creates a kobject must, of course, initialize that object. Some
-of the internal fields are setup with a (mandatory) call to kobject_init():
+of the internal fields are setup with a (mandatory) call to kobject_init()::
void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
The ktype is required for a kobject to be created properly, as every kobject
must have an associated kobj_type. After calling kobject_init(), to
-register the kobject with sysfs, the function kobject_add() must be called:
+register the kobject with sysfs, the function kobject_add() must be called::
- int kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...);
+ int kobject_add(struct kobject *kobj, struct kobject *parent,
+ const char *fmt, ...);
This sets up the parent of the kobject and the name for the kobject
properly. If the kobject is to be associated with a specific kset,
@@ -133,7 +136,7 @@ kset itself.
As the name of the kobject is set when it is added to the kernel, the name
of the kobject should never be manipulated directly. If you must change
-the name of the kobject, call kobject_rename():
+the name of the kobject, call kobject_rename()::
int kobject_rename(struct kobject *kobj, const char *new_name);
@@ -146,12 +149,12 @@ is being removed. If your code needs to call this function, it is
incorrect and needs to be fixed.
To properly access the name of the kobject, use the function
-kobject_name():
+kobject_name()::
const char *kobject_name(const struct kobject * kobj);
There is a helper function to both initialize and add the kobject to the
-kernel at the same time, called surprisingly enough kobject_init_and_add():
+kernel at the same time, called surprisingly enough kobject_init_and_add()::
int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
struct kobject *parent, const char *fmt, ...);
@@ -161,10 +164,11 @@ kobject_add() functions described above.
Uevents
+=======
After a kobject has been registered with the kobject core, you need to
announce to the world that it has been created. This can be done with a
-call to kobject_uevent():
+call to kobject_uevent()::
int kobject_uevent(struct kobject *kobj, enum kobject_action action);
@@ -180,11 +184,12 @@ hand.
Reference counts
+================
One of the key functions of a kobject is to serve as a reference counter
for the object in which it is embedded. As long as references to the object
exist, the object (and the code which supports it) must continue to exist.
-The low-level functions for manipulating a kobject's reference counts are:
+The low-level functions for manipulating a kobject's reference counts are::
struct kobject *kobject_get(struct kobject *kobj);
void kobject_put(struct kobject *kobj);
@@ -209,21 +214,24 @@ file Documentation/kref.txt in the Linux kernel source tree.
Creating "simple" kobjects
+==========================
Sometimes all that a developer wants is a way to create a simple directory
in the sysfs hierarchy, and not have to mess with the whole complication of
ksets, show and store functions, and other details. This is the one
exception where a single kobject should be created. To create such an
-entry, use the function:
+entry, use the function::
struct kobject *kobject_create_and_add(char *name, struct kobject *parent);
This function will create a kobject and place it in sysfs in the location
underneath the specified parent kobject. To create simple attributes
-associated with this kobject, use:
+associated with this kobject, use::
int sysfs_create_file(struct kobject *kobj, struct attribute *attr);
-or
+
+or::
+
int sysfs_create_group(struct kobject *kobj, struct attribute_group *grp);
Both types of attributes used here, with a kobject that has been created
@@ -236,6 +244,7 @@ implementation of a simple kobject and attributes.
ktypes and release methods
+==========================
One important thing still missing from the discussion is what happens to a
kobject when its reference count reaches zero. The code which created the
@@ -257,7 +266,7 @@ is good practice to always use kobject_put() after kobject_init() to avoid
errors creeping in.
This notification is done through a kobject's release() method. Usually
-such a method has a form like:
+such a method has a form like::
void my_object_release(struct kobject *kobj)
{
@@ -281,7 +290,7 @@ leak in the kobject core, which makes people unhappy.
Interestingly, the release() method is not stored in the kobject itself;
instead, it is associated with the ktype. So let us introduce struct
-kobj_type:
+kobj_type::
struct kobj_type {
void (*release)(struct kobject *kobj);
@@ -306,6 +315,7 @@ automatically created for any kobject that is registered with this ktype.
ksets
+=====
A kset is merely a collection of kobjects that want to be associated with
each other. There is no restriction that they be of the same ktype, but be
@@ -335,13 +345,16 @@ kobject) in their parent.
As a kset contains a kobject within it, it should always be dynamically
created and never declared statically or on the stack. To create a new
-kset use:
+kset use::
+
struct kset *kset_create_and_add(const char *name,
struct kset_uevent_ops *u,
struct kobject *parent);
-When you are finished with the kset, call:
+When you are finished with the kset, call::
+
void kset_unregister(struct kset *kset);
+
to destroy it. This removes the kset from sysfs and decrements its reference
count. When the reference count goes to zero, the kset will be released.
Because other references to the kset may still exist, the release may happen
@@ -351,14 +364,14 @@ An example of using a kset can be seen in the
samples/kobject/kset-example.c file in the kernel tree.
If a kset wishes to control the uevent operations of the kobjects
-associated with it, it can use the struct kset_uevent_ops to handle it:
+associated with it, it can use the struct kset_uevent_ops to handle it::
-struct kset_uevent_ops {
+ struct kset_uevent_ops {
int (*filter)(struct kset *kset, struct kobject *kobj);
const char *(*name)(struct kset *kset, struct kobject *kobj);
int (*uevent)(struct kset *kset, struct kobject *kobj,
struct kobj_uevent_env *env);
-};
+ };
The filter function allows a kset to prevent a uevent from being emitted to
@@ -386,6 +399,7 @@ added below the parent kobject.
Kobject removal
+===============
After a kobject has been registered with the kobject core successfully, it
must be cleaned up when the code is finished with it. To do that, call
@@ -409,6 +423,7 @@ called, and the objects in the former circle release each other.
Example code to copy from
+=========================
For a more complete example of using ksets and kobjects properly, see the
example programs samples/kobject/{kobject-example.c,kset-example.c},
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 1f6d45abfe42..2335715bf471 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -1,30 +1,36 @@
-Title : Kernel Probes (Kprobes)
-Authors : Jim Keniston <jkenisto@us.ibm.com>
- : Prasanna S Panchamukhi <prasanna.panchamukhi@gmail.com>
- : Masami Hiramatsu <mhiramat@redhat.com>
-
-CONTENTS
-
-1. Concepts: Kprobes, Jprobes, Return Probes
-2. Architectures Supported
-3. Configuring Kprobes
-4. API Reference
-5. Kprobes Features and Limitations
-6. Probe Overhead
-7. TODO
-8. Kprobes Example
-9. Jprobes Example
-10. Kretprobes Example
-Appendix A: The kprobes debugfs interface
-Appendix B: The kprobes sysctl interface
-
-1. Concepts: Kprobes, Jprobes, Return Probes
+=======================
+Kernel Probes (Kprobes)
+=======================
+
+:Author: Jim Keniston <jkenisto@us.ibm.com>
+:Author: Prasanna S Panchamukhi <prasanna.panchamukhi@gmail.com>
+:Author: Masami Hiramatsu <mhiramat@redhat.com>
+
+.. CONTENTS
+
+ 1. Concepts: Kprobes, Jprobes, Return Probes
+ 2. Architectures Supported
+ 3. Configuring Kprobes
+ 4. API Reference
+ 5. Kprobes Features and Limitations
+ 6. Probe Overhead
+ 7. TODO
+ 8. Kprobes Example
+ 9. Jprobes Example
+ 10. Kretprobes Example
+ Appendix A: The kprobes debugfs interface
+ Appendix B: The kprobes sysctl interface
+
+Concepts: Kprobes, Jprobes, Return Probes
+=========================================
Kprobes enables you to dynamically break into any kernel routine and
collect debugging and performance information non-disruptively. You
-can trap at almost any kernel code address(*), specifying a handler
+can trap at almost any kernel code address [1]_, specifying a handler
routine to be invoked when the breakpoint is hit.
-(*: some parts of the kernel code can not be trapped, see 1.5 Blacklist)
+
+.. [1] some parts of the kernel code can not be trapped, see
+ :ref:`kprobes_blacklist`)
There are currently three types of probes: kprobes, jprobes, and
kretprobes (also called return probes). A kprobe can be inserted
@@ -40,8 +46,8 @@ registration function such as register_kprobe() specifies where
the probe is to be inserted and what handler is to be called when
the probe is hit.
-There are also register_/unregister_*probes() functions for batch
-registration/unregistration of a group of *probes. These functions
+There are also ``register_/unregister_*probes()`` functions for batch
+registration/unregistration of a group of ``*probes``. These functions
can speed up unregistration process when you have to unregister
a lot of probes at once.
@@ -51,9 +57,10 @@ things that you'll need to know in order to make the best use of
Kprobes -- e.g., the difference between a pre_handler and
a post_handler, and how to use the maxactive and nmissed fields of
a kretprobe. But if you're in a hurry to start using Kprobes, you
-can skip ahead to section 2.
+can skip ahead to :ref:`kprobes_archs_supported`.
-1.1 How Does a Kprobe Work?
+How Does a Kprobe Work?
+-----------------------
When a kprobe is registered, Kprobes makes a copy of the probed
instruction and replaces the first byte(s) of the probed instruction
@@ -75,7 +82,8 @@ After the instruction is single-stepped, Kprobes executes the
"post_handler," if any, that is associated with the kprobe.
Execution then continues with the instruction following the probepoint.
-1.2 How Does a Jprobe Work?
+How Does a Jprobe Work?
+-----------------------
A jprobe is implemented using a kprobe that is placed on a function's
entry point. It employs a simple mirroring principle to allow
@@ -113,9 +121,11 @@ more than eight function arguments, an argument of more than sixteen
bytes, or more than 64 bytes of argument data, depending on
architecture).
-1.3 Return Probes
+Return Probes
+-------------
-1.3.1 How Does a Return Probe Work?
+How Does a Return Probe Work?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When you call register_kretprobe(), Kprobes establishes a kprobe at
the entry to the function. When the probed function is called and this
@@ -150,7 +160,8 @@ zero when the return probe is registered, and is incremented every
time the probed function is entered but there is no kretprobe_instance
object available for establishing the return probe.
-1.3.2 Kretprobe entry-handler
+Kretprobe entry-handler
+^^^^^^^^^^^^^^^^^^^^^^^
Kretprobes also provides an optional user-specified handler which runs
on function entry. This handler is specified by setting the entry_handler
@@ -174,7 +185,10 @@ In case probed function is entered but there is no kretprobe_instance
object available, then in addition to incrementing the nmissed count,
the user entry_handler invocation is also skipped.
-1.4 How Does Jump Optimization Work?
+.. _kprobes_jump_optimization:
+
+How Does Jump Optimization Work?
+--------------------------------
If your kernel is built with CONFIG_OPTPROBES=y (currently this flag
is automatically set 'y' on x86/x86-64, non-preemptive kernel) and
@@ -182,53 +196,60 @@ the "debug.kprobes_optimization" kernel parameter is set to 1 (see
sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump
instruction instead of a breakpoint instruction at each probepoint.
-1.4.1 Init a Kprobe
+Init a Kprobe
+^^^^^^^^^^^^^
When a probe is registered, before attempting this optimization,
Kprobes inserts an ordinary, breakpoint-based kprobe at the specified
address. So, even if it's not possible to optimize this particular
probepoint, there'll be a probe there.
-1.4.2 Safety Check
+Safety Check
+^^^^^^^^^^^^
Before optimizing a probe, Kprobes performs the following safety checks:
- Kprobes verifies that the region that will be replaced by the jump
-instruction (the "optimized region") lies entirely within one function.
-(A jump instruction is multiple bytes, and so may overlay multiple
-instructions.)
+ instruction (the "optimized region") lies entirely within one function.
+ (A jump instruction is multiple bytes, and so may overlay multiple
+ instructions.)
- Kprobes analyzes the entire function and verifies that there is no
-jump into the optimized region. Specifically:
+ jump into the optimized region. Specifically:
+
- the function contains no indirect jump;
- the function contains no instruction that causes an exception (since
- the fixup code triggered by the exception could jump back into the
- optimized region -- Kprobes checks the exception tables to verify this);
- and
+ the fixup code triggered by the exception could jump back into the
+ optimized region -- Kprobes checks the exception tables to verify this);
- there is no near jump to the optimized region (other than to the first
- byte).
+ byte).
- For each instruction in the optimized region, Kprobes verifies that
-the instruction can be executed out of line.
+ the instruction can be executed out of line.
-1.4.3 Preparing Detour Buffer
+Preparing Detour Buffer
+^^^^^^^^^^^^^^^^^^^^^^^
Next, Kprobes prepares a "detour" buffer, which contains the following
instruction sequence:
+
- code to push the CPU's registers (emulating a breakpoint trap)
- a call to the trampoline code which calls user's probe handlers.
- code to restore registers
- the instructions from the optimized region
- a jump back to the original execution path.
-1.4.4 Pre-optimization
+Pre-optimization
+^^^^^^^^^^^^^^^^
After preparing the detour buffer, Kprobes verifies that none of the
following situations exist:
+
- The probe has either a break_handler (i.e., it's a jprobe) or a
-post_handler.
+ post_handler.
- Other instructions in the optimized region are probed.
- The probe is disabled.
+
In any of the above cases, Kprobes won't start optimizing the probe.
Since these are temporary situations, Kprobes tries to start
optimizing it again if the situation is changed.
@@ -240,21 +261,23 @@ Kprobes returns control to the original instruction path by setting
the CPU's instruction pointer to the copied code in the detour buffer
-- thus at least avoiding the single-step.
-1.4.5 Optimization
+Optimization
+^^^^^^^^^^^^
The Kprobe-optimizer doesn't insert the jump instruction immediately;
rather, it calls synchronize_sched() for safety first, because it's
possible for a CPU to be interrupted in the middle of executing the
-optimized region(*). As you know, synchronize_sched() can ensure
+optimized region [3]_. As you know, synchronize_sched() can ensure
that all interruptions that were active when synchronize_sched()
was called are done, but only if CONFIG_PREEMPT=n. So, this version
-of kprobe optimization supports only kernels with CONFIG_PREEMPT=n.(**)
+of kprobe optimization supports only kernels with CONFIG_PREEMPT=n [4]_.
After that, the Kprobe-optimizer calls stop_machine() to replace
the optimized region with a jump instruction to the detour buffer,
using text_poke_smp().
-1.4.6 Unoptimization
+Unoptimization
+^^^^^^^^^^^^^^
When an optimized kprobe is unregistered, disabled, or blocked by
another kprobe, it will be unoptimized. If this happens before
@@ -263,15 +286,15 @@ optimized list. If the optimization has been done, the jump is
replaced with the original code (except for an int3 breakpoint in
the first byte) by using text_poke_smp().
-(*)Please imagine that the 2nd instruction is interrupted and then
-the optimizer replaces the 2nd instruction with the jump *address*
-while the interrupt handler is running. When the interrupt
-returns to original address, there is no valid instruction,
-and it causes an unexpected result.
+.. [3] Please imagine that the 2nd instruction is interrupted and then
+ the optimizer replaces the 2nd instruction with the jump *address*
+ while the interrupt handler is running. When the interrupt
+ returns to original address, there is no valid instruction,
+ and it causes an unexpected result.
-(**)This optimization-safety checking may be replaced with the
-stop-machine method that ksplice uses for supporting a CONFIG_PREEMPT=y
-kernel.
+.. [4] This optimization-safety checking may be replaced with the
+ stop-machine method that ksplice uses for supporting a CONFIG_PREEMPT=y
+ kernel.
NOTE for geeks:
The jump optimization changes the kprobe's pre_handler behavior.
@@ -280,11 +303,17 @@ path by changing regs->ip and returning 1. However, when the probe
is optimized, that modification is ignored. Thus, if you want to
tweak the kernel's execution path, you need to suppress optimization,
using one of the following techniques:
+
- Specify an empty function for the kprobe's post_handler or break_handler.
- or
+
+or
+
- Execute 'sysctl -w debug.kprobes_optimization=n'
-1.5 Blacklist
+.. _kprobes_blacklist:
+
+Blacklist
+---------
Kprobes can probe most of the kernel except itself. This means
that there are some functions where kprobes cannot probe. Probing
@@ -297,7 +326,10 @@ to specify a blacklisted function.
Kprobes checks the given probe address against the blacklist and
rejects registering it, if the given address is in the blacklist.
-2. Architectures Supported
+.. _kprobes_archs_supported:
+
+Architectures Supported
+=======================
Kprobes, jprobes, and return probes are implemented on the following
architectures:
@@ -312,7 +344,8 @@ architectures:
- mips
- s390
-3. Configuring Kprobes
+Configuring Kprobes
+===================
When configuring the kernel using make menuconfig/xconfig/oldconfig,
ensure that CONFIG_KPROBES is set to "y". Under "General setup", look
@@ -331,7 +364,8 @@ it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO),
so you can use "objdump -d -l vmlinux" to see the source-to-object
code mapping.
-4. API Reference
+API Reference
+=============
The Kprobes API includes a "register" function and an "unregister"
function for each type of probe. The API also includes "register_*probes"
@@ -340,10 +374,13 @@ Here are terse, mini-man-page specifications for these functions and
the associated probe handlers that you'll write. See the files in the
samples/kprobes/ sub-directory for examples.
-4.1 register_kprobe
+register_kprobe
+---------------
+
+::
-#include <linux/kprobes.h>
-int register_kprobe(struct kprobe *kp);
+ #include <linux/kprobes.h>
+ int register_kprobe(struct kprobe *kp);
Sets a breakpoint at the address kp->addr. When the breakpoint is
hit, Kprobes calls kp->pre_handler. After the probed instruction
@@ -354,61 +391,68 @@ kp->fault_handler. Any or all handlers can be NULL. If kp->flags
is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled,
so, its handlers aren't hit until calling enable_kprobe(kp).
-NOTE:
-1. With the introduction of the "symbol_name" field to struct kprobe,
-the probepoint address resolution will now be taken care of by the kernel.
-The following will now work:
+.. note::
+
+ 1. With the introduction of the "symbol_name" field to struct kprobe,
+ the probepoint address resolution will now be taken care of by the kernel.
+ The following will now work::
kp.symbol_name = "symbol_name";
-(64-bit powerpc intricacies such as function descriptors are handled
-transparently)
+ (64-bit powerpc intricacies such as function descriptors are handled
+ transparently)
-2. Use the "offset" field of struct kprobe if the offset into the symbol
-to install a probepoint is known. This field is used to calculate the
-probepoint.
+ 2. Use the "offset" field of struct kprobe if the offset into the symbol
+ to install a probepoint is known. This field is used to calculate the
+ probepoint.
-3. Specify either the kprobe "symbol_name" OR the "addr". If both are
-specified, kprobe registration will fail with -EINVAL.
+ 3. Specify either the kprobe "symbol_name" OR the "addr". If both are
+ specified, kprobe registration will fail with -EINVAL.
-4. With CISC architectures (such as i386 and x86_64), the kprobes code
-does not validate if the kprobe.addr is at an instruction boundary.
-Use "offset" with caution.
+ 4. With CISC architectures (such as i386 and x86_64), the kprobes code
+ does not validate if the kprobe.addr is at an instruction boundary.
+ Use "offset" with caution.
register_kprobe() returns 0 on success, or a negative errno otherwise.
-User's pre-handler (kp->pre_handler):
-#include <linux/kprobes.h>
-#include <linux/ptrace.h>
-int pre_handler(struct kprobe *p, struct pt_regs *regs);
+User's pre-handler (kp->pre_handler)::
+
+ #include <linux/kprobes.h>
+ #include <linux/ptrace.h>
+ int pre_handler(struct kprobe *p, struct pt_regs *regs);
Called with p pointing to the kprobe associated with the breakpoint,
and regs pointing to the struct containing the registers saved when
the breakpoint was hit. Return 0 here unless you're a Kprobes geek.
-User's post-handler (kp->post_handler):
-#include <linux/kprobes.h>
-#include <linux/ptrace.h>
-void post_handler(struct kprobe *p, struct pt_regs *regs,
- unsigned long flags);
+User's post-handler (kp->post_handler)::
+
+ #include <linux/kprobes.h>
+ #include <linux/ptrace.h>
+ void post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags);
p and regs are as described for the pre_handler. flags always seems
to be zero.
-User's fault-handler (kp->fault_handler):
-#include <linux/kprobes.h>
-#include <linux/ptrace.h>
-int fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr);
+User's fault-handler (kp->fault_handler)::
+
+ #include <linux/kprobes.h>
+ #include <linux/ptrace.h>
+ int fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr);
p and regs are as described for the pre_handler. trapnr is the
architecture-specific trap number associated with the fault (e.g.,
on i386, 13 for a general protection fault or 14 for a page fault).
Returns 1 if it successfully handled the exception.
-4.2 register_jprobe
+register_jprobe
+---------------
-#include <linux/kprobes.h>
-int register_jprobe(struct jprobe *jp)
+::
+
+ #include <linux/kprobes.h>
+ int register_jprobe(struct jprobe *jp)
Sets a breakpoint at the address jp->kp.addr, which must be the address
of the first instruction of a function. When the breakpoint is hit,
@@ -423,10 +467,13 @@ declaration must match.
register_jprobe() returns 0 on success, or a negative errno otherwise.
-4.3 register_kretprobe
+register_kretprobe
+------------------
+
+::
-#include <linux/kprobes.h>
-int register_kretprobe(struct kretprobe *rp);
+ #include <linux/kprobes.h>
+ int register_kretprobe(struct kretprobe *rp);
Establishes a return probe for the function whose address is
rp->kp.addr. When that function returns, Kprobes calls rp->handler.
@@ -436,14 +483,17 @@ register_kretprobe(); see "How Does a Return Probe Work?" for details.
register_kretprobe() returns 0 on success, or a negative errno
otherwise.
-User's return-probe handler (rp->handler):
-#include <linux/kprobes.h>
-#include <linux/ptrace.h>
-int kretprobe_handler(struct kretprobe_instance *ri, struct pt_regs *regs);
+User's return-probe handler (rp->handler)::
+
+ #include <linux/kprobes.h>
+ #include <linux/ptrace.h>
+ int kretprobe_handler(struct kretprobe_instance *ri,
+ struct pt_regs *regs);
regs is as described for kprobe.pre_handler. ri points to the
kretprobe_instance object, of which the following fields may be
of interest:
+
- ret_addr: the return address
- rp: points to the corresponding kretprobe object
- task: points to the corresponding task struct
@@ -456,74 +506,94 @@ the architecture's ABI.
The handler's return value is currently ignored.
-4.4 unregister_*probe
+unregister_*probe
+------------------
+
+::
-#include <linux/kprobes.h>
-void unregister_kprobe(struct kprobe *kp);
-void unregister_jprobe(struct jprobe *jp);
-void unregister_kretprobe(struct kretprobe *rp);
+ #include <linux/kprobes.h>
+ void unregister_kprobe(struct kprobe *kp);
+ void unregister_jprobe(struct jprobe *jp);
+ void unregister_kretprobe(struct kretprobe *rp);
Removes the specified probe. The unregister function can be called
at any time after the probe has been registered.
-NOTE:
-If the functions find an incorrect probe (ex. an unregistered probe),
-they clear the addr field of the probe.
+.. note::
+
+ If the functions find an incorrect probe (ex. an unregistered probe),
+ they clear the addr field of the probe.
+
+register_*probes
+----------------
-4.5 register_*probes
+::
-#include <linux/kprobes.h>
-int register_kprobes(struct kprobe **kps, int num);
-int register_kretprobes(struct kretprobe **rps, int num);
-int register_jprobes(struct jprobe **jps, int num);
+ #include <linux/kprobes.h>
+ int register_kprobes(struct kprobe **kps, int num);
+ int register_kretprobes(struct kretprobe **rps, int num);
+ int register_jprobes(struct jprobe **jps, int num);
Registers each of the num probes in the specified array. If any
error occurs during registration, all probes in the array, up to
the bad probe, are safely unregistered before the register_*probes
function returns.
-- kps/rps/jps: an array of pointers to *probe data structures
+
+- kps/rps/jps: an array of pointers to ``*probe`` data structures
- num: the number of the array entries.
-NOTE:
-You have to allocate(or define) an array of pointers and set all
-of the array entries before using these functions.
+.. note::
+
+ You have to allocate(or define) an array of pointers and set all
+ of the array entries before using these functions.
-4.6 unregister_*probes
+unregister_*probes
+------------------
-#include <linux/kprobes.h>
-void unregister_kprobes(struct kprobe **kps, int num);
-void unregister_kretprobes(struct kretprobe **rps, int num);
-void unregister_jprobes(struct jprobe **jps, int num);
+::
+
+ #include <linux/kprobes.h>
+ void unregister_kprobes(struct kprobe **kps, int num);
+ void unregister_kretprobes(struct kretprobe **rps, int num);
+ void unregister_jprobes(struct jprobe **jps, int num);
Removes each of the num probes in the specified array at once.
-NOTE:
-If the functions find some incorrect probes (ex. unregistered
-probes) in the specified array, they clear the addr field of those
-incorrect probes. However, other probes in the array are
-unregistered correctly.
+.. note::
+
+ If the functions find some incorrect probes (ex. unregistered
+ probes) in the specified array, they clear the addr field of those
+ incorrect probes. However, other probes in the array are
+ unregistered correctly.
-4.7 disable_*probe
+disable_*probe
+--------------
-#include <linux/kprobes.h>
-int disable_kprobe(struct kprobe *kp);
-int disable_kretprobe(struct kretprobe *rp);
-int disable_jprobe(struct jprobe *jp);
+::
-Temporarily disables the specified *probe. You can enable it again by using
+ #include <linux/kprobes.h>
+ int disable_kprobe(struct kprobe *kp);
+ int disable_kretprobe(struct kretprobe *rp);
+ int disable_jprobe(struct jprobe *jp);
+
+Temporarily disables the specified ``*probe``. You can enable it again by using
enable_*probe(). You must specify the probe which has been registered.
-4.8 enable_*probe
+enable_*probe
+-------------
+
+::
-#include <linux/kprobes.h>
-int enable_kprobe(struct kprobe *kp);
-int enable_kretprobe(struct kretprobe *rp);
-int enable_jprobe(struct jprobe *jp);
+ #include <linux/kprobes.h>
+ int enable_kprobe(struct kprobe *kp);
+ int enable_kretprobe(struct kretprobe *rp);
+ int enable_jprobe(struct jprobe *jp);
-Enables *probe which has been disabled by disable_*probe(). You must specify
+Enables ``*probe`` which has been disabled by disable_*probe(). You must specify
the probe which has been registered.
-5. Kprobes Features and Limitations
+Kprobes Features and Limitations
+================================
Kprobes allows multiple probes at the same address. Currently,
however, there cannot be multiple jprobes on the same function at
@@ -538,7 +608,7 @@ are discussed in this section.
The register_*probe functions will return -EINVAL if you attempt
to install a probe in the code that implements Kprobes (mostly
-kernel/kprobes.c and arch/*/kernel/kprobes.c, but also functions such
+kernel/kprobes.c and ``arch/*/kernel/kprobes.c``, but also functions such
as do_page_fault and notifier_call_chain).
If you install a probe in an inline-able function, Kprobes makes
@@ -602,19 +672,21 @@ explain it, we introduce some terminology. Imagine a 3-instruction
sequence consisting of a two 2-byte instructions and one 3-byte
instruction.
- IA
- |
-[-2][-1][0][1][2][3][4][5][6][7]
- [ins1][ins2][ ins3 ]
- [<- DCR ->]
- [<- JTPR ->]
+::
-ins1: 1st Instruction
-ins2: 2nd Instruction
-ins3: 3rd Instruction
-IA: Insertion Address
-JTPR: Jump Target Prohibition Region
-DCR: Detoured Code Region
+ IA
+ |
+ [-2][-1][0][1][2][3][4][5][6][7]
+ [ins1][ins2][ ins3 ]
+ [<- DCR ->]
+ [<- JTPR ->]
+
+ ins1: 1st Instruction
+ ins2: 2nd Instruction
+ ins3: 3rd Instruction
+ IA: Insertion Address
+ JTPR: Jump Target Prohibition Region
+ DCR: Detoured Code Region
The instructions in DCR are copied to the out-of-line buffer
of the kprobe, because the bytes in DCR are replaced by
@@ -628,7 +700,8 @@ d) DCR must not straddle the border between functions.
Anyway, these limitations are checked by the in-kernel instruction
decoder, so you don't need to worry about that.
-6. Probe Overhead
+Probe Overhead
+==============
On a typical CPU in use in 2005, a kprobe hit takes 0.5 to 1.0
microseconds to process. Specifically, a benchmark that hits the same
@@ -638,70 +711,80 @@ return-probe hit typically takes 50-75% longer than a kprobe hit.
When you have a return probe set on a function, adding a kprobe at
the entry to that function adds essentially no overhead.
-Here are sample overhead figures (in usec) for different architectures.
-k = kprobe; j = jprobe; r = return probe; kr = kprobe + return probe
-on same function; jr = jprobe + return probe on same function
+Here are sample overhead figures (in usec) for different architectures::
+
+ k = kprobe; j = jprobe; r = return probe; kr = kprobe + return probe
+ on same function; jr = jprobe + return probe on same function::
-i386: Intel Pentium M, 1495 MHz, 2957.31 bogomips
-k = 0.57 usec; j = 1.00; r = 0.92; kr = 0.99; jr = 1.40
+ i386: Intel Pentium M, 1495 MHz, 2957.31 bogomips
+ k = 0.57 usec; j = 1.00; r = 0.92; kr = 0.99; jr = 1.40
-x86_64: AMD Opteron 246, 1994 MHz, 3971.48 bogomips
-k = 0.49 usec; j = 0.76; r = 0.80; kr = 0.82; jr = 1.07
+ x86_64: AMD Opteron 246, 1994 MHz, 3971.48 bogomips
+ k = 0.49 usec; j = 0.76; r = 0.80; kr = 0.82; jr = 1.07
-ppc64: POWER5 (gr), 1656 MHz (SMT disabled, 1 virtual CPU per physical CPU)
-k = 0.77 usec; j = 1.31; r = 1.26; kr = 1.45; jr = 1.99
+ ppc64: POWER5 (gr), 1656 MHz (SMT disabled, 1 virtual CPU per physical CPU)
+ k = 0.77 usec; j = 1.31; r = 1.26; kr = 1.45; jr = 1.99
-6.1 Optimized Probe Overhead
+Optimized Probe Overhead
+------------------------
Typically, an optimized kprobe hit takes 0.07 to 0.1 microseconds to
-process. Here are sample overhead figures (in usec) for x86 architectures.
-k = unoptimized kprobe, b = boosted (single-step skipped), o = optimized kprobe,
-r = unoptimized kretprobe, rb = boosted kretprobe, ro = optimized kretprobe.
+process. Here are sample overhead figures (in usec) for x86 architectures::
-i386: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips
-k = 0.80 usec; b = 0.33; o = 0.05; r = 1.10; rb = 0.61; ro = 0.33
+ k = unoptimized kprobe, b = boosted (single-step skipped), o = optimized kprobe,
+ r = unoptimized kretprobe, rb = boosted kretprobe, ro = optimized kretprobe.
-x86-64: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips
-k = 0.99 usec; b = 0.43; o = 0.06; r = 1.24; rb = 0.68; ro = 0.30
+ i386: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips
+ k = 0.80 usec; b = 0.33; o = 0.05; r = 1.10; rb = 0.61; ro = 0.33
-7. TODO
+ x86-64: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips
+ k = 0.99 usec; b = 0.43; o = 0.06; r = 1.24; rb = 0.68; ro = 0.30
+
+TODO
+====
a. SystemTap (http://sourceware.org/systemtap): Provides a simplified
-programming interface for probe-based instrumentation. Try it out.
+ programming interface for probe-based instrumentation. Try it out.
b. Kernel return probes for sparc64.
c. Support for other architectures.
d. User-space probes.
e. Watchpoint probes (which fire on data references).
-8. Kprobes Example
+Kprobes Example
+===============
See samples/kprobes/kprobe_example.c
-9. Jprobes Example
+Jprobes Example
+===============
See samples/kprobes/jprobe_example.c
-10. Kretprobes Example
+Kretprobes Example
+==================
See samples/kprobes/kretprobe_example.c
For additional information on Kprobes, refer to the following URLs:
-http://www-106.ibm.com/developerworks/library/l-kprobes.html?ca=dgr-lnxw42Kprobe
-http://www.redhat.com/magazine/005mar05/features/kprobes/
-http://www-users.cs.umn.edu/~boutcher/kprobes/
-http://www.linuxsymposium.org/2006/linuxsymposium_procv2.pdf (pages 101-115)
+
+- http://www-106.ibm.com/developerworks/library/l-kprobes.html?ca=dgr-lnxw42Kprobe
+- http://www.redhat.com/magazine/005mar05/features/kprobes/
+- http://www-users.cs.umn.edu/~boutcher/kprobes/
+- http://www.linuxsymposium.org/2006/linuxsymposium_procv2.pdf (pages 101-115)
-Appendix A: The kprobes debugfs interface
+The kprobes debugfs interface
+=============================
+
With recent kernels (> 2.6.20) the list of registered kprobes is visible
under the /sys/kernel/debug/kprobes/ directory (assuming debugfs is mounted at //sys/kernel/debug).
-/sys/kernel/debug/kprobes/list: Lists all registered probes on the system
+/sys/kernel/debug/kprobes/list: Lists all registered probes on the system::
-c015d71a k vfs_read+0x0
-c011a316 j do_fork+0x0
-c03dedc5 r tcp_v4_rcv+0x0
+ c015d71a k vfs_read+0x0
+ c011a316 j do_fork+0x0
+ c03dedc5 r tcp_v4_rcv+0x0
The first column provides the kernel address where the probe is inserted.
The second column identifies the type of probe (k - kprobe, r - kretprobe
@@ -725,17 +808,19 @@ change each probe's disabling state. This means that disabled kprobes (marked
[DISABLED]) will be not enabled if you turn ON all kprobes by this knob.
-Appendix B: The kprobes sysctl interface
+The kprobes sysctl interface
+============================
/proc/sys/debug/kprobes-optimization: Turn kprobes optimization ON/OFF.
When CONFIG_OPTPROBES=y, this sysctl interface appears and it provides
a knob to globally and forcibly turn jump optimization (see section
-1.4) ON or OFF. By default, jump optimization is allowed (ON).
-If you echo "0" to this file or set "debug.kprobes_optimization" to
-0 via sysctl, all optimized probes will be unoptimized, and any new
-probes registered after that will not be optimized. Note that this
-knob *changes* the optimized state. This means that optimized probes
-(marked [OPTIMIZED]) will be unoptimized ([OPTIMIZED] tag will be
+:ref:`kprobes_jump_optimization`) ON or OFF. By default, jump optimization
+is allowed (ON). If you echo "0" to this file or set
+"debug.kprobes_optimization" to 0 via sysctl, all optimized probes will be
+unoptimized, and any new probes registered after that will not be optimized.
+
+Note that this knob *changes* the optimized state. This means that optimized
+probes (marked [OPTIMIZED]) will be unoptimized ([OPTIMIZED] tag will be
removed). If the knob is turned on, they will be optimized again.
diff --git a/Documentation/kref.txt b/Documentation/kref.txt
index d26a27ca964d..3af384156d7e 100644
--- a/Documentation/kref.txt
+++ b/Documentation/kref.txt
@@ -1,24 +1,42 @@
+===================================================
+Adding reference counters (krefs) to kernel objects
+===================================================
+
+:Author: Corey Minyard <minyard@acm.org>
+:Author: Thomas Hellstrom <thellstrom@vmware.com>
+
+A lot of this was lifted from Greg Kroah-Hartman's 2004 OLS paper and
+presentation on krefs, which can be found at:
+
+ - http://www.kroah.com/linux/talks/ols_2004_kref_paper/Reprint-Kroah-Hartman-OLS2004.pdf
+ - http://www.kroah.com/linux/talks/ols_2004_kref_talk/
+
+Introduction
+============
krefs allow you to add reference counters to your objects. If you
have objects that are used in multiple places and passed around, and
you don't have refcounts, your code is almost certainly broken. If
you want refcounts, krefs are the way to go.
-To use a kref, add one to your data structures like:
+To use a kref, add one to your data structures like::
-struct my_data
-{
+ struct my_data
+ {
.
.
struct kref refcount;
.
.
-};
+ };
The kref can occur anywhere within the data structure.
+Initialization
+==============
+
You must initialize the kref after you allocate it. To do this, call
-kref_init as so:
+kref_init as so::
struct my_data *data;
@@ -29,18 +47,25 @@ kref_init as so:
This sets the refcount in the kref to 1.
+Kref rules
+==========
+
Once you have an initialized kref, you must follow the following
rules:
1) If you make a non-temporary copy of a pointer, especially if
it can be passed to another thread of execution, you must
- increment the refcount with kref_get() before passing it off:
+ increment the refcount with kref_get() before passing it off::
+
kref_get(&data->refcount);
+
If you already have a valid pointer to a kref-ed structure (the
refcount cannot go to zero) you may do this without a lock.
-2) When you are done with a pointer, you must call kref_put():
+2) When you are done with a pointer, you must call kref_put()::
+
kref_put(&data->refcount, data_release);
+
If this is the last reference to the pointer, the release
routine will be called. If the code never tries to get
a valid pointer to a kref-ed structure without already
@@ -53,25 +78,25 @@ rules:
structure must remain valid during the kref_get().
For example, if you allocate some data and then pass it to another
-thread to process:
+thread to process::
-void data_release(struct kref *ref)
-{
+ void data_release(struct kref *ref)
+ {
struct my_data *data = container_of(ref, struct my_data, refcount);
kfree(data);
-}
+ }
-void more_data_handling(void *cb_data)
-{
+ void more_data_handling(void *cb_data)
+ {
struct my_data *data = cb_data;
.
. do stuff with data here
.
kref_put(&data->refcount, data_release);
-}
+ }
-int my_data_handler(void)
-{
+ int my_data_handler(void)
+ {
int rv = 0;
struct my_data *data;
struct task_struct *task;
@@ -91,10 +116,10 @@ int my_data_handler(void)
.
. do stuff with data here
.
- out:
+ out:
kref_put(&data->refcount, data_release);
return rv;
-}
+ }
This way, it doesn't matter what order the two threads handle the
data, the kref_put() handles knowing when the data is not referenced
@@ -104,7 +129,7 @@ put needs no lock because nothing tries to get the data without
already holding a pointer.
Note that the "before" in rule 1 is very important. You should never
-do something like:
+do something like::
task = kthread_run(more_data_handling, data, "more_data_handling");
if (task == ERR_PTR(-ENOMEM)) {
@@ -124,14 +149,14 @@ bad style. Don't do it.
There are some situations where you can optimize the gets and puts.
For instance, if you are done with an object and enqueuing it for
something else or passing it off to something else, there is no reason
-to do a get then a put:
+to do a get then a put::
/* Silly extra get and put */
kref_get(&obj->ref);
enqueue(obj);
kref_put(&obj->ref, obj_cleanup);
-Just do the enqueue. A comment about this is always welcome:
+Just do the enqueue. A comment about this is always welcome::
enqueue(obj);
/* We are done with obj, so we pass our refcount off
@@ -142,109 +167,99 @@ instance, you have a list of items that are each kref-ed, and you wish
to get the first one. You can't just pull the first item off the list
and kref_get() it. That violates rule 3 because you are not already
holding a valid pointer. You must add a mutex (or some other lock).
-For instance:
-
-static DEFINE_MUTEX(mutex);
-static LIST_HEAD(q);
-struct my_data
-{
- struct kref refcount;
- struct list_head link;
-};
-
-static struct my_data *get_entry()
-{
- struct my_data *entry = NULL;
- mutex_lock(&mutex);
- if (!list_empty(&q)) {
- entry = container_of(q.next, struct my_data, link);
- kref_get(&entry->refcount);
+For instance::
+
+ static DEFINE_MUTEX(mutex);
+ static LIST_HEAD(q);
+ struct my_data
+ {
+ struct kref refcount;
+ struct list_head link;
+ };
+
+ static struct my_data *get_entry()
+ {
+ struct my_data *entry = NULL;
+ mutex_lock(&mutex);
+ if (!list_empty(&q)) {
+ entry = container_of(q.next, struct my_data, link);
+ kref_get(&entry->refcount);
+ }
+ mutex_unlock(&mutex);
+ return entry;
}
- mutex_unlock(&mutex);
- return entry;
-}
-static void release_entry(struct kref *ref)
-{
- struct my_data *entry = container_of(ref, struct my_data, refcount);
+ static void release_entry(struct kref *ref)
+ {
+ struct my_data *entry = container_of(ref, struct my_data, refcount);
- list_del(&entry->link);
- kfree(entry);
-}
+ list_del(&entry->link);
+ kfree(entry);
+ }
-static void put_entry(struct my_data *entry)
-{
- mutex_lock(&mutex);
- kref_put(&entry->refcount, release_entry);
- mutex_unlock(&mutex);
-}
+ static void put_entry(struct my_data *entry)
+ {
+ mutex_lock(&mutex);
+ kref_put(&entry->refcount, release_entry);
+ mutex_unlock(&mutex);
+ }
The kref_put() return value is useful if you do not want to hold the
lock during the whole release operation. Say you didn't want to call
kfree() with the lock held in the example above (since it is kind of
-pointless to do so). You could use kref_put() as follows:
+pointless to do so). You could use kref_put() as follows::
-static void release_entry(struct kref *ref)
-{
- /* All work is done after the return from kref_put(). */
-}
+ static void release_entry(struct kref *ref)
+ {
+ /* All work is done after the return from kref_put(). */
+ }
-static void put_entry(struct my_data *entry)
-{
- mutex_lock(&mutex);
- if (kref_put(&entry->refcount, release_entry)) {
- list_del(&entry->link);
- mutex_unlock(&mutex);
- kfree(entry);
- } else
- mutex_unlock(&mutex);
-}
+ static void put_entry(struct my_data *entry)
+ {
+ mutex_lock(&mutex);
+ if (kref_put(&entry->refcount, release_entry)) {
+ list_del(&entry->link);
+ mutex_unlock(&mutex);
+ kfree(entry);
+ } else
+ mutex_unlock(&mutex);
+ }
This is really more useful if you have to call other routines as part
of the free operations that could take a long time or might claim the
same lock. Note that doing everything in the release routine is still
preferred as it is a little neater.
-
-Corey Minyard <minyard@acm.org>
-
-A lot of this was lifted from Greg Kroah-Hartman's 2004 OLS paper and
-presentation on krefs, which can be found at:
- http://www.kroah.com/linux/talks/ols_2004_kref_paper/Reprint-Kroah-Hartman-OLS2004.pdf
-and:
- http://www.kroah.com/linux/talks/ols_2004_kref_talk/
-
-
The above example could also be optimized using kref_get_unless_zero() in
-the following way:
-
-static struct my_data *get_entry()
-{
- struct my_data *entry = NULL;
- mutex_lock(&mutex);
- if (!list_empty(&q)) {
- entry = container_of(q.next, struct my_data, link);
- if (!kref_get_unless_zero(&entry->refcount))
- entry = NULL;
+the following way::
+
+ static struct my_data *get_entry()
+ {
+ struct my_data *entry = NULL;
+ mutex_lock(&mutex);
+ if (!list_empty(&q)) {
+ entry = container_of(q.next, struct my_data, link);
+ if (!kref_get_unless_zero(&entry->refcount))
+ entry = NULL;
+ }
+ mutex_unlock(&mutex);
+ return entry;
}
- mutex_unlock(&mutex);
- return entry;
-}
-static void release_entry(struct kref *ref)
-{
- struct my_data *entry = container_of(ref, struct my_data, refcount);
+ static void release_entry(struct kref *ref)
+ {
+ struct my_data *entry = container_of(ref, struct my_data, refcount);
- mutex_lock(&mutex);
- list_del(&entry->link);
- mutex_unlock(&mutex);
- kfree(entry);
-}
+ mutex_lock(&mutex);
+ list_del(&entry->link);
+ mutex_unlock(&mutex);
+ kfree(entry);
+ }
-static void put_entry(struct my_data *entry)
-{
- kref_put(&entry->refcount, release_entry);
-}
+ static void put_entry(struct my_data *entry)
+ {
+ kref_put(&entry->refcount, release_entry);
+ }
Which is useful to remove the mutex lock around kref_put() in put_entry(), but
it's important that kref_get_unless_zero is enclosed in the same critical
@@ -254,51 +269,51 @@ Note that it is illegal to use kref_get_unless_zero without checking its
return value. If you are sure (by already having a valid pointer) that
kref_get_unless_zero() will return true, then use kref_get() instead.
-The function kref_get_unless_zero also makes it possible to use rcu
-locking for lookups in the above example:
+Krefs and RCU
+=============
-struct my_data
-{
- struct rcu_head rhead;
- .
- struct kref refcount;
- .
- .
-};
-
-static struct my_data *get_entry_rcu()
-{
- struct my_data *entry = NULL;
- rcu_read_lock();
- if (!list_empty(&q)) {
- entry = container_of(q.next, struct my_data, link);
- if (!kref_get_unless_zero(&entry->refcount))
- entry = NULL;
+The function kref_get_unless_zero also makes it possible to use rcu
+locking for lookups in the above example::
+
+ struct my_data
+ {
+ struct rcu_head rhead;
+ .
+ struct kref refcount;
+ .
+ .
+ };
+
+ static struct my_data *get_entry_rcu()
+ {
+ struct my_data *entry = NULL;
+ rcu_read_lock();
+ if (!list_empty(&q)) {
+ entry = container_of(q.next, struct my_data, link);
+ if (!kref_get_unless_zero(&entry->refcount))
+ entry = NULL;
+ }
+ rcu_read_unlock();
+ return entry;
}
- rcu_read_unlock();
- return entry;
-}
-static void release_entry_rcu(struct kref *ref)
-{
- struct my_data *entry = container_of(ref, struct my_data, refcount);
+ static void release_entry_rcu(struct kref *ref)
+ {
+ struct my_data *entry = container_of(ref, struct my_data, refcount);
- mutex_lock(&mutex);
- list_del_rcu(&entry->link);
- mutex_unlock(&mutex);
- kfree_rcu(entry, rhead);
-}
+ mutex_lock(&mutex);
+ list_del_rcu(&entry->link);
+ mutex_unlock(&mutex);
+ kfree_rcu(entry, rhead);
+ }
-static void put_entry(struct my_data *entry)
-{
- kref_put(&entry->refcount, release_entry_rcu);
-}
+ static void put_entry(struct my_data *entry)
+ {
+ kref_put(&entry->refcount, release_entry_rcu);
+ }
But note that the struct kref member needs to remain in valid memory for a
rcu grace period after release_entry_rcu was called. That can be accomplished
by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu()
before using kfree, but note that synchronize_rcu() may sleep for a
substantial amount of time.
-
-
-Thomas Hellstrom <thellstrom@vmware.com>
diff --git a/Documentation/ldm.txt b/Documentation/ldm.txt
index 4f80edd14d0a..12c571368e73 100644
--- a/Documentation/ldm.txt
+++ b/Documentation/ldm.txt
@@ -1,9 +1,9 @@
+==========================================
+LDM - Logical Disk Manager (Dynamic Disks)
+==========================================
- LDM - Logical Disk Manager (Dynamic Disks)
- ------------------------------------------
-
-Originally Written by FlatCap - Richard Russon <ldm@flatcap.org>.
-Last Updated by Anton Altaparmakov on 30 March 2007 for Windows Vista.
+:Author: Originally Written by FlatCap - Richard Russon <ldm@flatcap.org>.
+:Last Updated: Anton Altaparmakov on 30 March 2007 for Windows Vista.
Overview
--------
@@ -37,24 +37,36 @@ Example
-------
Below we have a 50MiB disk, divided into seven partitions.
-N.B. The missing 1MiB at the end of the disk is where the LDM database is
- stored.
-
- Device | Offset Bytes Sectors MiB | Size Bytes Sectors MiB
- -------+----------------------------+---------------------------
- hda | 0 0 0 | 52428800 102400 50
- hda1 | 51380224 100352 49 | 1048576 2048 1
- hda2 | 16384 32 0 | 6979584 13632 6
- hda3 | 6995968 13664 6 | 10485760 20480 10
- hda4 | 17481728 34144 16 | 4194304 8192 4
- hda5 | 21676032 42336 20 | 5242880 10240 5
- hda6 | 26918912 52576 25 | 10485760 20480 10
- hda7 | 37404672 73056 35 | 13959168 27264 13
+
+.. note::
+
+ The missing 1MiB at the end of the disk is where the LDM database is
+ stored.
+
++-------++--------------+---------+-----++--------------+---------+----+
+|Device || Offset Bytes | Sectors | MiB || Size Bytes | Sectors | MiB|
++=======++==============+=========+=====++==============+=========+====+
+|hda || 0 | 0 | 0 || 52428800 | 102400 | 50|
++-------++--------------+---------+-----++--------------+---------+----+
+|hda1 || 51380224 | 100352 | 49 || 1048576 | 2048 | 1|
++-------++--------------+---------+-----++--------------+---------+----+
+|hda2 || 16384 | 32 | 0 || 6979584 | 13632 | 6|
++-------++--------------+---------+-----++--------------+---------+----+
+|hda3 || 6995968 | 13664 | 6 || 10485760 | 20480 | 10|
++-------++--------------+---------+-----++--------------+---------+----+
+|hda4 || 17481728 | 34144 | 16 || 4194304 | 8192 | 4|
++-------++--------------+---------+-----++--------------+---------+----+
+|hda5 || 21676032 | 42336 | 20 || 5242880 | 10240 | 5|
++-------++--------------+---------+-----++--------------+---------+----+
+|hda6 || 26918912 | 52576 | 25 || 10485760 | 20480 | 10|
++-------++--------------+---------+-----++--------------+---------+----+
+|hda7 || 37404672 | 73056 | 35 || 13959168 | 27264 | 13|
++-------++--------------+---------+-----++--------------+---------+----+
The LDM Database may not store the partitions in the order that they appear on
disk, but the driver will sort them.
-When Linux boots, you will see something like:
+When Linux boots, you will see something like::
hda: 102400 sectors w/32KiB Cache, CHS=50/64/32
hda: [LDM] hda1 hda2 hda3 hda4 hda5 hda6 hda7
@@ -65,13 +77,13 @@ Compiling LDM Support
To enable LDM, choose the following two options:
- "Advanced partition selection" CONFIG_PARTITION_ADVANCED
- "Windows Logical Disk Manager (Dynamic Disk) support" CONFIG_LDM_PARTITION
+ - "Advanced partition selection" CONFIG_PARTITION_ADVANCED
+ - "Windows Logical Disk Manager (Dynamic Disk) support" CONFIG_LDM_PARTITION
If you believe the driver isn't working as it should, you can enable the extra
debugging code. This will produce a LOT of output. The option is:
- "Windows LDM extra logging" CONFIG_LDM_DEBUG
+ - "Windows LDM extra logging" CONFIG_LDM_DEBUG
N.B. The partition code cannot be compiled as a module.
diff --git a/Documentation/lockup-watchdogs.txt b/Documentation/lockup-watchdogs.txt
index c8b8378513d6..290840c160af 100644
--- a/Documentation/lockup-watchdogs.txt
+++ b/Documentation/lockup-watchdogs.txt
@@ -30,7 +30,8 @@ timeout is set through the confusingly named "kernel.panic" sysctl),
to cause the system to reboot automatically after a specified amount
of time.
-=== Implementation ===
+Implementation
+==============
The soft and hard lockup detectors are built on top of the hrtimer and
perf subsystems, respectively. A direct consequence of this is that,
diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt
index 285c54f66779..6fa6a93d0949 100644
--- a/Documentation/lzo.txt
+++ b/Documentation/lzo.txt
@@ -1,8 +1,9 @@
-
+===========================================================
LZO stream format as understood by Linux's LZO decompressor
===========================================================
Introduction
+============
This is not a specification. No specification seems to be publicly available
for the LZO stream format. This document describes what input format the LZO
@@ -14,12 +15,13 @@ Introduction
for future bug reports.
Description
+===========
The stream is composed of a series of instructions, operands, and data. The
instructions consist in a few bits representing an opcode, and bits forming
the operands for the instruction, whose size and position depend on the
opcode and on the number of literals copied by previous instruction. The
- operands are used to indicate :
+ operands are used to indicate:
- a distance when copying data from the dictionary (past output buffer)
- a length (number of bytes to copy from dictionary)
@@ -38,7 +40,7 @@ Description
of bits in the operand. If the number of bits isn't enough to represent the
length, up to 255 may be added in increments by consuming more bytes with a
rate of at most 255 per extra byte (thus the compression ratio cannot exceed
- around 255:1). The variable length encoding using #bits is always the same :
+ around 255:1). The variable length encoding using #bits is always the same::
length = byte & ((1 << #bits) - 1)
if (!length) {
@@ -67,15 +69,19 @@ Description
instruction may encode this distance (0001HLLL), it takes one LE16 operand
for the distance, thus requiring 3 bytes.
- IMPORTANT NOTE : in the code some length checks are missing because certain
- instructions are called under the assumption that a certain number of bytes
- follow because it has already been guaranteed before parsing the instructions.
- They just have to "refill" this credit if they consume extra bytes. This is
- an implementation design choice independent on the algorithm or encoding.
+ .. important::
+
+ In the code some length checks are missing because certain instructions
+ are called under the assumption that a certain number of bytes follow
+ because it has already been guaranteed before parsing the instructions.
+ They just have to "refill" this credit if they consume extra bytes. This
+ is an implementation design choice independent on the algorithm or
+ encoding.
Byte sequences
+==============
- First byte encoding :
+ First byte encoding::
0..17 : follow regular instruction encoding, see below. It is worth
noting that codes 16 and 17 will represent a block copy from
@@ -91,7 +97,7 @@ Byte sequences
state = 4 [ don't copy extra literals ]
skip byte
- Instruction encoding :
+ Instruction encoding::
0 0 0 0 X X X X (0..15)
Depends on the number of literals copied by the last instruction.
@@ -156,6 +162,7 @@ Byte sequences
distance = (H << 3) + D + 1
Authors
+=======
This document was written by Willy Tarreau <w@1wt.eu> on 2014/07/19 during an
analysis of the decompression code available in Linux 3.16-rc5. The code is
diff --git a/Documentation/mailbox.txt b/Documentation/mailbox.txt
index 7ed371c85204..0ed95009cc30 100644
--- a/Documentation/mailbox.txt
+++ b/Documentation/mailbox.txt
@@ -1,7 +1,10 @@
- The Common Mailbox Framework
- Jassi Brar <jaswinder.singh@linaro.org>
+============================
+The Common Mailbox Framework
+============================
- This document aims to help developers write client and controller
+:Author: Jassi Brar <jaswinder.singh@linaro.org>
+
+This document aims to help developers write client and controller
drivers for the API. But before we start, let us note that the
client (especially) and controller drivers are likely going to be
very platform specific because the remote firmware is likely to be
@@ -13,14 +16,17 @@ similar copies of code written for each platform. Having said that,
nothing prevents the remote f/w to also be Linux based and use the
same api there. However none of that helps us locally because we only
ever deal at client's protocol level.
- Some of the choices made during implementation are the result of this
+
+Some of the choices made during implementation are the result of this
peculiarity of this "common" framework.
- Part 1 - Controller Driver (See include/linux/mailbox_controller.h)
+Controller Driver (See include/linux/mailbox_controller.h)
+==========================================================
+
- Allocate mbox_controller and the array of mbox_chan.
+Allocate mbox_controller and the array of mbox_chan.
Populate mbox_chan_ops, except peek_data() all are mandatory.
The controller driver might know a message has been consumed
by the remote by getting an IRQ or polling some hardware flag
@@ -30,91 +36,94 @@ the controller driver should set via 'txdone_irq' or 'txdone_poll'
or neither.
- Part 2 - Client Driver (See include/linux/mailbox_client.h)
+Client Driver (See include/linux/mailbox_client.h)
+==================================================
- The client might want to operate in blocking mode (synchronously
+
+The client might want to operate in blocking mode (synchronously
send a message through before returning) or non-blocking/async mode (submit
a message and a callback function to the API and return immediately).
-
-struct demo_client {
- struct mbox_client cl;
- struct mbox_chan *mbox;
- struct completion c;
- bool async;
- /* ... */
-};
-
-/*
- * This is the handler for data received from remote. The behaviour is purely
- * dependent upon the protocol. This is just an example.
- */
-static void message_from_remote(struct mbox_client *cl, void *mssg)
-{
- struct demo_client *dc = container_of(cl, struct demo_client, cl);
- if (dc->async) {
- if (is_an_ack(mssg)) {
- /* An ACK to our last sample sent */
- return; /* Or do something else here */
- } else { /* A new message from remote */
- queue_req(mssg);
+::
+
+ struct demo_client {
+ struct mbox_client cl;
+ struct mbox_chan *mbox;
+ struct completion c;
+ bool async;
+ /* ... */
+ };
+
+ /*
+ * This is the handler for data received from remote. The behaviour is purely
+ * dependent upon the protocol. This is just an example.
+ */
+ static void message_from_remote(struct mbox_client *cl, void *mssg)
+ {
+ struct demo_client *dc = container_of(cl, struct demo_client, cl);
+ if (dc->async) {
+ if (is_an_ack(mssg)) {
+ /* An ACK to our last sample sent */
+ return; /* Or do something else here */
+ } else { /* A new message from remote */
+ queue_req(mssg);
+ }
+ } else {
+ /* Remote f/w sends only ACK packets on this channel */
+ return;
}
- } else {
- /* Remote f/w sends only ACK packets on this channel */
- return;
}
-}
-
-static void sample_sent(struct mbox_client *cl, void *mssg, int r)
-{
- struct demo_client *dc = container_of(cl, struct demo_client, cl);
- complete(&dc->c);
-}
-
-static void client_demo(struct platform_device *pdev)
-{
- struct demo_client *dc_sync, *dc_async;
- /* The controller already knows async_pkt and sync_pkt */
- struct async_pkt ap;
- struct sync_pkt sp;
-
- dc_sync = kzalloc(sizeof(*dc_sync), GFP_KERNEL);
- dc_async = kzalloc(sizeof(*dc_async), GFP_KERNEL);
-
- /* Populate non-blocking mode client */
- dc_async->cl.dev = &pdev->dev;
- dc_async->cl.rx_callback = message_from_remote;
- dc_async->cl.tx_done = sample_sent;
- dc_async->cl.tx_block = false;
- dc_async->cl.tx_tout = 0; /* doesn't matter here */
- dc_async->cl.knows_txdone = false; /* depending upon protocol */
- dc_async->async = true;
- init_completion(&dc_async->c);
-
- /* Populate blocking mode client */
- dc_sync->cl.dev = &pdev->dev;
- dc_sync->cl.rx_callback = message_from_remote;
- dc_sync->cl.tx_done = NULL; /* operate in blocking mode */
- dc_sync->cl.tx_block = true;
- dc_sync->cl.tx_tout = 500; /* by half a second */
- dc_sync->cl.knows_txdone = false; /* depending upon protocol */
- dc_sync->async = false;
-
- /* ASync mailbox is listed second in 'mboxes' property */
- dc_async->mbox = mbox_request_channel(&dc_async->cl, 1);
- /* Populate data packet */
- /* ap.xxx = 123; etc */
- /* Send async message to remote */
- mbox_send_message(dc_async->mbox, &ap);
-
- /* Sync mailbox is listed first in 'mboxes' property */
- dc_sync->mbox = mbox_request_channel(&dc_sync->cl, 0);
- /* Populate data packet */
- /* sp.abc = 123; etc */
- /* Send message to remote in blocking mode */
- mbox_send_message(dc_sync->mbox, &sp);
- /* At this point 'sp' has been sent */
-
- /* Now wait for async chan to be done */
- wait_for_completion(&dc_async->c);
-}
+
+ static void sample_sent(struct mbox_client *cl, void *mssg, int r)
+ {
+ struct demo_client *dc = container_of(cl, struct demo_client, cl);
+ complete(&dc->c);
+ }
+
+ static void client_demo(struct platform_device *pdev)
+ {
+ struct demo_client *dc_sync, *dc_async;
+ /* The controller already knows async_pkt and sync_pkt */
+ struct async_pkt ap;
+ struct sync_pkt sp;
+
+ dc_sync = kzalloc(sizeof(*dc_sync), GFP_KERNEL);
+ dc_async = kzalloc(sizeof(*dc_async), GFP_KERNEL);
+
+ /* Populate non-blocking mode client */
+ dc_async->cl.dev = &pdev->dev;
+ dc_async->cl.rx_callback = message_from_remote;
+ dc_async->cl.tx_done = sample_sent;
+ dc_async->cl.tx_block = false;
+ dc_async->cl.tx_tout = 0; /* doesn't matter here */
+ dc_async->cl.knows_txdone = false; /* depending upon protocol */
+ dc_async->async = true;
+ init_completion(&dc_async->c);
+
+ /* Populate blocking mode client */
+ dc_sync->cl.dev = &pdev->dev;
+ dc_sync->cl.rx_callback = message_from_remote;
+ dc_sync->cl.tx_done = NULL; /* operate in blocking mode */
+ dc_sync->cl.tx_block = true;
+ dc_sync->cl.tx_tout = 500; /* by half a second */
+ dc_sync->cl.knows_txdone = false; /* depending upon protocol */
+ dc_sync->async = false;
+
+ /* ASync mailbox is listed second in 'mboxes' property */
+ dc_async->mbox = mbox_request_channel(&dc_async->cl, 1);
+ /* Populate data packet */
+ /* ap.xxx = 123; etc */
+ /* Send async message to remote */
+ mbox_send_message(dc_async->mbox, &ap);
+
+ /* Sync mailbox is listed first in 'mboxes' property */
+ dc_sync->mbox = mbox_request_channel(&dc_sync->cl, 0);
+ /* Populate data packet */
+ /* sp.abc = 123; etc */
+ /* Send message to remote in blocking mode */
+ mbox_send_message(dc_sync->mbox, &sp);
+ /* At this point 'sp' has been sent */
+
+ /* Now wait for async chan to be done */
+ wait_for_completion(&dc_async->c);
+ }
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index c239a0cf4b1a..c4ddfcd5ee32 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -1876,8 +1876,8 @@ There are some more advanced barrier functions:
This makes sure that the death mark on the object is perceived to be set
*before* the reference counter is decremented.
- See Documentation/atomic_ops.txt for more information. See the "Atomic
- operations" subsection for information on where to use these.
+ See Documentation/core-api/atomic_ops.rst for more information. See the
+ "Atomic operations" subsection for information on where to use these.
(*) lockless_dereference();
@@ -2584,7 +2584,7 @@ situations because on some CPUs the atomic instructions used imply full memory
barriers, and so barrier instructions are superfluous in conjunction with them,
and in such cases the special barrier primitives will be no-ops.
-See Documentation/atomic_ops.txt for more information.
+See Documentation/core-api/atomic_ops.rst for more information.
ACCESSING DEVICES
diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt
index 5c628e19d6cd..7f49ebf3ddb2 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/memory-hotplug.txt
@@ -2,43 +2,48 @@
Memory Hotplug
==============
-Created: Jul 28 2007
-Add description of notifier of memory hotplug Oct 11 2007
+:Created: Jul 28 2007
+:Updated: Add description of notifier of memory hotplug: Oct 11 2007
This document is about memory hotplug including how-to-use and current status.
Because Memory Hotplug is still under development, contents of this text will
be changed often.
-1. Introduction
- 1.1 purpose of memory hotplug
- 1.2. Phases of memory hotplug
- 1.3. Unit of Memory online/offline operation
-2. Kernel Configuration
-3. sysfs files for memory hotplug
-4. Physical memory hot-add phase
- 4.1 Hardware(Firmware) Support
- 4.2 Notify memory hot-add event by hand
-5. Logical Memory hot-add phase
- 5.1. State of memory
- 5.2. How to online memory
-6. Logical memory remove
- 6.1 Memory offline and ZONE_MOVABLE
- 6.2. How to offline memory
-7. Physical memory remove
-8. Memory hotplug event notifier
-9. Future Work List
-
-Note(1): x86_64's has special implementation for memory hotplug.
- This text does not describe it.
-Note(2): This text assumes that sysfs is mounted at /sys.
+.. CONTENTS
+ 1. Introduction
+ 1.1 purpose of memory hotplug
+ 1.2. Phases of memory hotplug
+ 1.3. Unit of Memory online/offline operation
+ 2. Kernel Configuration
+ 3. sysfs files for memory hotplug
+ 4. Physical memory hot-add phase
+ 4.1 Hardware(Firmware) Support
+ 4.2 Notify memory hot-add event by hand
+ 5. Logical Memory hot-add phase
+ 5.1. State of memory
+ 5.2. How to online memory
+ 6. Logical memory remove
+ 6.1 Memory offline and ZONE_MOVABLE
+ 6.2. How to offline memory
+ 7. Physical memory remove
+ 8. Memory hotplug event notifier
+ 9. Future Work List
----------------
-1. Introduction
----------------
-1.1 purpose of memory hotplug
-------------
+.. note::
+
+ (1) x86_64's has special implementation for memory hotplug.
+ This text does not describe it.
+ (2) This text assumes that sysfs is mounted at /sys.
+
+
+Introduction
+============
+
+purpose of memory hotplug
+-------------------------
+
Memory Hotplug allows users to increase/decrease the amount of memory.
Generally, there are two purposes.
@@ -53,9 +58,11 @@ hardware which supports memory power management.
Linux memory hotplug is designed for both purpose.
-1.2. Phases of memory hotplug
----------------
-There are 2 phases in Memory Hotplug.
+Phases of memory hotplug
+------------------------
+
+There are 2 phases in Memory Hotplug:
+
1) Physical Memory Hotplug phase
2) Logical Memory Hotplug phase.
@@ -70,7 +77,7 @@ management tables, and makes sysfs files for new memory's operation.
If firmware supports notification of connection of new memory to OS,
this phase is triggered automatically. ACPI can notify this event. If not,
"probe" operation by system administration is used instead.
-(see Section 4.).
+(see :ref:`memory_hotplug_physical_mem`).
Logical Memory Hotplug phase is to change memory state into
available/unavailable for users. Amount of memory from user's view is
@@ -83,11 +90,12 @@ Logical Memory Hotplug phase is triggered by write of sysfs file by system
administrator. For the hot-add case, it must be executed after Physical Hotplug
phase by hand.
(However, if you writes udev's hotplug scripts for memory hotplug, these
- phases can be execute in seamless way.)
+phases can be execute in seamless way.)
+
+Unit of Memory online/offline operation
+---------------------------------------
-1.3. Unit of Memory online/offline operation
-------------
Memory hotplug uses SPARSEMEM memory model which allows memory to be divided
into chunks of the same size. These chunks are called "sections". The size of
a memory section is architecture dependent. For example, power uses 16MiB, ia64
@@ -97,46 +105,50 @@ Memory sections are combined into chunks referred to as "memory blocks". The
size of a memory block is architecture dependent and represents the logical
unit upon which memory online/offline operations are to be performed. The
default size of a memory block is the same as memory section size unless an
-architecture specifies otherwise. (see Section 3.)
+architecture specifies otherwise. (see :ref:`memory_hotplug_sysfs_files`.)
To determine the size (in bytes) of a memory block please read this file:
/sys/devices/system/memory/block_size_bytes
------------------------
-2. Kernel Configuration
------------------------
+Kernel Configuration
+====================
+
To use memory hotplug feature, kernel must be compiled with following
config options.
-- For all memory hotplug
- Memory model -> Sparse Memory (CONFIG_SPARSEMEM)
- Allow for memory hot-add (CONFIG_MEMORY_HOTPLUG)
+- For all memory hotplug:
+ - Memory model -> Sparse Memory (CONFIG_SPARSEMEM)
+ - Allow for memory hot-add (CONFIG_MEMORY_HOTPLUG)
-- To enable memory removal, the following are also necessary
- Allow for memory hot remove (CONFIG_MEMORY_HOTREMOVE)
- Page Migration (CONFIG_MIGRATION)
+- To enable memory removal, the following are also necessary:
+ - Allow for memory hot remove (CONFIG_MEMORY_HOTREMOVE)
+ - Page Migration (CONFIG_MIGRATION)
-- For ACPI memory hotplug, the following are also necessary
- Memory hotplug (under ACPI Support menu) (CONFIG_ACPI_HOTPLUG_MEMORY)
- This option can be kernel module.
+- For ACPI memory hotplug, the following are also necessary:
+ - Memory hotplug (under ACPI Support menu) (CONFIG_ACPI_HOTPLUG_MEMORY)
+ - This option can be kernel module.
- As a related configuration, if your box has a feature of NUMA-node hotplug
via ACPI, then this option is necessary too.
- ACPI0004,PNP0A05 and PNP0A06 Container Driver (under ACPI Support menu)
- (CONFIG_ACPI_CONTAINER).
- This option can be kernel module too.
+ - ACPI0004,PNP0A05 and PNP0A06 Container Driver (under ACPI Support menu)
+ (CONFIG_ACPI_CONTAINER).
+
+ This option can be kernel module too.
+
+
+.. _memory_hotplug_sysfs_files:
+
+sysfs files for memory hotplug
+==============================
---------------------------------
-3 sysfs files for memory hotplug
---------------------------------
All memory blocks have their device information in sysfs. Each memory block
-is described under /sys/devices/system/memory as
+is described under /sys/devices/system/memory as:
-/sys/devices/system/memory/memoryXXX
-(XXX is the memory block id.)
+ /sys/devices/system/memory/memoryXXX
+ (XXX is the memory block id.)
For the memory block covered by the sysfs directory. It is expected that all
memory sections in this range are present and no memory holes exist in the
@@ -145,43 +157,53 @@ the existence of one should not affect the hotplug capabilities of the memory
block.
For example, assume 1GiB memory block size. A device for a memory starting at
-0x100000000 is /sys/device/system/memory/memory4
-(0x100000000 / 1Gib = 4)
+0x100000000 is /sys/device/system/memory/memory4::
+
+ (0x100000000 / 1Gib = 4)
+
This device covers address range [0x100000000 ... 0x140000000)
Under each memory block, you can see 5 files:
-/sys/devices/system/memory/memoryXXX/phys_index
-/sys/devices/system/memory/memoryXXX/phys_device
-/sys/devices/system/memory/memoryXXX/state
-/sys/devices/system/memory/memoryXXX/removable
-/sys/devices/system/memory/memoryXXX/valid_zones
+- /sys/devices/system/memory/memoryXXX/phys_index
+- /sys/devices/system/memory/memoryXXX/phys_device
+- /sys/devices/system/memory/memoryXXX/state
+- /sys/devices/system/memory/memoryXXX/removable
+- /sys/devices/system/memory/memoryXXX/valid_zones
+
+=================== ============================================================
+``phys_index`` read-only and contains memory block id, same as XXX.
+``state`` read-write
+
+ - at read: contains online/offline state of memory.
+ - at write: user can specify "online_kernel",
-'phys_index' : read-only and contains memory block id, same as XXX.
-'state' : read-write
- at read: contains online/offline state of memory.
- at write: user can specify "online_kernel",
"online_movable", "online", "offline" command
which will be performed on all sections in the block.
-'phys_device' : read-only: designed to show the name of physical memory
+``phys_device`` read-only: designed to show the name of physical memory
device. This is not well implemented now.
-'removable' : read-only: contains an integer value indicating
+``removable`` read-only: contains an integer value indicating
whether the memory block is removable or not
removable. A value of 1 indicates that the memory
block is removable and a value of 0 indicates that
it is not removable. A memory block is removable only if
every section in the block is removable.
-'valid_zones' : read-only: designed to show which zones this memory block
+``valid_zones`` read-only: designed to show which zones this memory block
can be onlined to.
- The first column shows it's default zone.
+
+ The first column shows it`s default zone.
+
"memory6/valid_zones: Normal Movable" shows this memoryblock
can be onlined to ZONE_NORMAL by default and to ZONE_MOVABLE
by online_movable.
+
"memory7/valid_zones: Movable Normal" shows this memoryblock
can be onlined to ZONE_MOVABLE by default and to ZONE_NORMAL
by online_kernel.
+=================== ============================================================
+
+.. note::
-NOTE:
These directories/files appear after physical memory hotplug phase.
If CONFIG_NUMA is enabled the memoryXXX/ directories can also be accessed
@@ -193,13 +215,14 @@ For example:
A backlink will also be created:
/sys/devices/system/memory/memory9/node0 -> ../../node/node0
+.. _memory_hotplug_physical_mem:
+
+Physical memory hot-add phase
+=============================
---------------------------------
-4. Physical memory hot-add phase
---------------------------------
+Hardware(Firmware) Support
+--------------------------
-4.1 Hardware(Firmware) Support
-------------
On x86_64/ia64 platform, memory hotplug by ACPI is supported.
In general, the firmware (ACPI) which supports memory hotplug defines
@@ -209,7 +232,8 @@ script. This will be done automatically.
But scripts for memory hotplug are not contained in generic udev package(now).
You may have to write it by yourself or online/offline memory by hand.
-Please see "How to online memory", "How to offline memory" in this text.
+Please see :ref:`memory_hotplug_how_to_online_memory` and
+:ref:`memory_hotplug_how_to_offline_memory`.
If firmware supports NUMA-node hotplug, and defines an object _HID "ACPI0004",
"PNP0A05", or "PNP0A06", notification is asserted to it, and ACPI handler
@@ -217,8 +241,9 @@ calls hotplug code for all of objects which are defined in it.
If memory device is found, memory hotplug code will be called.
-4.2 Notify memory hot-add event by hand
-------------
+Notify memory hot-add event by hand
+-----------------------------------
+
On some architectures, the firmware may not notify the kernel of a memory
hotplug event. Therefore, the memory "probe" interface is supported to
explicitly notify the kernel. This interface depends on
@@ -229,45 +254,48 @@ notification.
Probe interface is located at
/sys/devices/system/memory/probe
-You can tell the physical address of new memory to the kernel by
+You can tell the physical address of new memory to the kernel by::
-% echo start_address_of_new_memory > /sys/devices/system/memory/probe
+ % echo start_address_of_new_memory > /sys/devices/system/memory/probe
Then, [start_address_of_new_memory, start_address_of_new_memory +
memory_block_size] memory range is hot-added. In this case, hotplug script is
not called (in current implementation). You'll have to online memory by
-yourself. Please see "How to online memory" in this text.
+yourself. Please see :ref:`memory_hotplug_how_to_online_memory`.
-------------------------------
-5. Logical Memory hot-add phase
-------------------------------
+Logical Memory hot-add phase
+============================
-5.1. State of memory
-------------
-To see (online/offline) state of a memory block, read 'state' file.
+State of memory
+---------------
+
+To see (online/offline) state of a memory block, read 'state' file::
+
+ % cat /sys/device/system/memory/memoryXXX/state
-% cat /sys/device/system/memory/memoryXXX/state
+- If the memory block is online, you'll read "online".
+- If the memory block is offline, you'll read "offline".
-If the memory block is online, you'll read "online".
-If the memory block is offline, you'll read "offline".
+.. _memory_hotplug_how_to_online_memory:
+
+How to online memory
+--------------------
-5.2. How to online memory
-------------
When the memory is hot-added, the kernel decides whether or not to "online"
-it according to the policy which can be read from "auto_online_blocks" file:
+it according to the policy which can be read from "auto_online_blocks" file::
-% cat /sys/devices/system/memory/auto_online_blocks
+ % cat /sys/devices/system/memory/auto_online_blocks
The default depends on the CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE kernel config
option. If it is disabled the default is "offline" which means the newly added
memory is not in a ready-to-use state and you have to "online" the newly added
memory blocks manually. Automatic onlining can be requested by writing "online"
-to "auto_online_blocks" file:
+to "auto_online_blocks" file::
-% echo online > /sys/devices/system/memory/auto_online_blocks
+ % echo online > /sys/devices/system/memory/auto_online_blocks
This sets a global policy and impacts all memory blocks that will subsequently
be hotplugged. Currently offline blocks keep their state. It is possible, under
@@ -277,24 +305,26 @@ online. User space tools can check their "state" files
If the automatic onlining wasn't requested, failed, or some memory block was
offlined it is possible to change the individual block's state by writing to the
-"state" file:
+"state" file::
-% echo online > /sys/devices/system/memory/memoryXXX/state
+ % echo online > /sys/devices/system/memory/memoryXXX/state
This onlining will not change the ZONE type of the target memory block,
If the memory block doesn't belong to any zone an appropriate kernel zone
(usually ZONE_NORMAL) will be used unless movable_node kernel command line
option is specified when ZONE_MOVABLE will be used.
-You can explicitly request to associate it with ZONE_MOVABLE by
+You can explicitly request to associate it with ZONE_MOVABLE by::
+
+ % echo online_movable > /sys/devices/system/memory/memoryXXX/state
-% echo online_movable > /sys/devices/system/memory/memoryXXX/state
-(NOTE: current limit: this memory block must be adjacent to ZONE_MOVABLE)
+.. note:: current limit: this memory block must be adjacent to ZONE_MOVABLE
-Or you can explicitly request a kernel zone (usually ZONE_NORMAL) by:
+Or you can explicitly request a kernel zone (usually ZONE_NORMAL) by::
-% echo online_kernel > /sys/devices/system/memory/memoryXXX/state
-(NOTE: current limit: this memory block must be adjacent to ZONE_NORMAL)
+ % echo online_kernel > /sys/devices/system/memory/memoryXXX/state
+
+.. note:: current limit: this memory block must be adjacent to ZONE_NORMAL
An explicit zone onlining can fail (e.g. when the range is already within
and existing and incompatible zone already).
@@ -306,12 +336,12 @@ This may be changed in future.
-------------------------
-6. Logical memory remove
-------------------------
+Logical memory remove
+=====================
+
+Memory offline and ZONE_MOVABLE
+-------------------------------
-6.1 Memory offline and ZONE_MOVABLE
-------------
Memory offlining is more complicated than memory online. Because memory offline
has to make the whole memory block be unused, memory offline can fail if
the memory block includes memory which cannot be freed.
@@ -336,24 +366,27 @@ Assume the system has "TOTAL" amount of memory at boot time, this boot option
creates ZONE_MOVABLE as following.
1) When kernelcore=YYYY boot option is used,
- Size of memory not for movable pages (not for offline) is YYYY.
- Size of memory for movable pages (for offline) is TOTAL-YYYY.
+ Size of memory not for movable pages (not for offline) is YYYY.
+ Size of memory for movable pages (for offline) is TOTAL-YYYY.
2) When movablecore=ZZZZ boot option is used,
- Size of memory not for movable pages (not for offline) is TOTAL - ZZZZ.
- Size of memory for movable pages (for offline) is ZZZZ.
+ Size of memory not for movable pages (not for offline) is TOTAL - ZZZZ.
+ Size of memory for movable pages (for offline) is ZZZZ.
+
+.. note::
+ Unfortunately, there is no information to show which memory block belongs
+ to ZONE_MOVABLE. This is TBD.
-Note: Unfortunately, there is no information to show which memory block belongs
-to ZONE_MOVABLE. This is TBD.
+.. _memory_hotplug_how_to_offline_memory:
+How to offline memory
+---------------------
-6.2. How to offline memory
-------------
You can offline a memory block by using the same sysfs interface that was used
-in memory onlining.
+in memory onlining::
-% echo offline > /sys/devices/system/memory/memoryXXX/state
+ % echo offline > /sys/devices/system/memory/memoryXXX/state
If offline succeeds, the state of the memory block is changed to be "offline".
If it fails, some error core (like -EBUSY) will be returned by the kernel.
@@ -367,22 +400,22 @@ able to offline it (or not). (For example, a page is referred to by some kernel
internal call and released soon.)
Consideration:
-Memory hotplug's design direction is to make the possibility of memory offlining
-higher and to guarantee unplugging memory under any situation. But it needs
-more work. Returning -EBUSY under some situation may be good because the user
-can decide to retry more or not by himself. Currently, memory offlining code
-does some amount of retry with 120 seconds timeout.
+ Memory hotplug's design direction is to make the possibility of memory
+ offlining higher and to guarantee unplugging memory under any situation. But
+ it needs more work. Returning -EBUSY under some situation may be good because
+ the user can decide to retry more or not by himself. Currently, memory
+ offlining code does some amount of retry with 120 seconds timeout.
+
+Physical memory remove
+======================
--------------------------
-7. Physical memory remove
--------------------------
Need more implementation yet....
- Notification completion of remove works by OS to firmware.
- Guard from remove if not yet.
---------------------------------
-8. Memory hotplug event notifier
---------------------------------
+Memory hotplug event notifier
+=============================
+
Hotplugging events are sent to a notification queue.
There are six types of notification defined in include/linux/memory.h:
@@ -412,14 +445,14 @@ MEM_CANCEL_OFFLINE
MEM_OFFLINE
Generated after offlining memory is complete.
-A callback routine can be registered by calling
+A callback routine can be registered by calling::
hotplug_memory_notifier(callback_func, priority)
Callback functions with higher values of priority are called before callback
functions with lower values.
-A callback function must have the following prototype:
+A callback function must have the following prototype::
int callback_func(
struct notifier_block *self, unsigned long action, void *arg);
@@ -427,27 +460,28 @@ A callback function must have the following prototype:
The first argument of the callback function (self) is a pointer to the block
of the notifier chain that points to the callback function itself.
The second argument (action) is one of the event types described above.
-The third argument (arg) passes a pointer of struct memory_notify.
-
-struct memory_notify {
- unsigned long start_pfn;
- unsigned long nr_pages;
- int status_change_nid_normal;
- int status_change_nid_high;
- int status_change_nid;
-}
-
-start_pfn is start_pfn of online/offline memory.
-nr_pages is # of pages of online/offline memory.
-status_change_nid_normal is set node id when N_NORMAL_MEMORY of nodemask
-is (will be) set/clear, if this is -1, then nodemask status is not changed.
-status_change_nid_high is set node id when N_HIGH_MEMORY of nodemask
-is (will be) set/clear, if this is -1, then nodemask status is not changed.
-status_change_nid is set node id when N_MEMORY of nodemask is (will be)
-set/clear. It means a new(memoryless) node gets new memory by online and a
-node loses all memory. If this is -1, then nodemask status is not changed.
-If status_changed_nid* >= 0, callback should create/discard structures for the
-node if necessary.
+The third argument (arg) passes a pointer of struct memory_notify::
+
+ struct memory_notify {
+ unsigned long start_pfn;
+ unsigned long nr_pages;
+ int status_change_nid_normal;
+ int status_change_nid_high;
+ int status_change_nid;
+ }
+
+- start_pfn is start_pfn of online/offline memory.
+- nr_pages is # of pages of online/offline memory.
+- status_change_nid_normal is set node id when N_NORMAL_MEMORY of nodemask
+ is (will be) set/clear, if this is -1, then nodemask status is not changed.
+- status_change_nid_high is set node id when N_HIGH_MEMORY of nodemask
+ is (will be) set/clear, if this is -1, then nodemask status is not changed.
+- status_change_nid is set node id when N_MEMORY of nodemask is (will be)
+ set/clear. It means a new(memoryless) node gets new memory by online and a
+ node loses all memory. If this is -1, then nodemask status is not changed.
+
+ If status_changed_nid* >= 0, callback should create/discard structures for the
+ node if necessary.
The callback routine shall return one of the values
NOTIFY_DONE, NOTIFY_OK, NOTIFY_BAD, NOTIFY_STOP
@@ -461,9 +495,9 @@ further processing of the notification queue.
NOTIFY_STOP stops further processing of the notification queue.
---------------
-9. Future Work
---------------
+Future Work
+===========
+
- allowing memory hot-add to ZONE_MOVABLE. maybe we need some switch like
sysctl or new control file.
- showing memory block and physical device relationship.
@@ -471,4 +505,3 @@ NOTIFY_STOP stops further processing of the notification queue.
- support HugeTLB page migration and offlining.
- memmap removing at memory offline.
- physical remove memory.
-
diff --git a/Documentation/men-chameleon-bus.txt b/Documentation/men-chameleon-bus.txt
index 30ded732027e..1b1f048aa748 100644
--- a/Documentation/men-chameleon-bus.txt
+++ b/Documentation/men-chameleon-bus.txt
@@ -1,163 +1,175 @@
- MEN Chameleon Bus
- =================
-
-Table of Contents
=================
-1 Introduction
- 1.1 Scope of this Document
- 1.2 Limitations of the current implementation
-2 Architecture
- 2.1 MEN Chameleon Bus
- 2.2 Carrier Devices
- 2.3 Parser
-3 Resource handling
- 3.1 Memory Resources
- 3.2 IRQs
-4 Writing an MCB driver
- 4.1 The driver structure
- 4.2 Probing and attaching
- 4.3 Initializing the driver
-
-
-1 Introduction
-===============
- This document describes the architecture and implementation of the MEN
- Chameleon Bus (called MCB throughout this document).
-
-1.1 Scope of this Document
----------------------------
- This document is intended to be a short overview of the current
- implementation and does by no means describe the complete possibilities of MCB
- based devices.
-
-1.2 Limitations of the current implementation
-----------------------------------------------
- The current implementation is limited to PCI and PCIe based carrier devices
- that only use a single memory resource and share the PCI legacy IRQ. Not
- implemented are:
- - Multi-resource MCB devices like the VME Controller or M-Module carrier.
- - MCB devices that need another MCB device, like SRAM for a DMA Controller's
- buffer descriptors or a video controller's video memory.
- - A per-carrier IRQ domain for carrier devices that have one (or more) IRQs
- per MCB device like PCIe based carriers with MSI or MSI-X support.
-
-2 Architecture
-===============
- MCB is divided into 3 functional blocks:
- - The MEN Chameleon Bus itself,
- - drivers for MCB Carrier Devices and
- - the parser for the Chameleon table.
-
-2.1 MEN Chameleon Bus
+MEN Chameleon Bus
+=================
+
+.. Table of Contents
+ =================
+ 1 Introduction
+ 1.1 Scope of this Document
+ 1.2 Limitations of the current implementation
+ 2 Architecture
+ 2.1 MEN Chameleon Bus
+ 2.2 Carrier Devices
+ 2.3 Parser
+ 3 Resource handling
+ 3.1 Memory Resources
+ 3.2 IRQs
+ 4 Writing an MCB driver
+ 4.1 The driver structure
+ 4.2 Probing and attaching
+ 4.3 Initializing the driver
+
+
+Introduction
+============
+
+This document describes the architecture and implementation of the MEN
+Chameleon Bus (called MCB throughout this document).
+
+Scope of this Document
----------------------
- The MEN Chameleon Bus is an artificial bus system that attaches to a so
- called Chameleon FPGA device found on some hardware produced my MEN Mikro
- Elektronik GmbH. These devices are multi-function devices implemented in a
- single FPGA and usually attached via some sort of PCI or PCIe link. Each
- FPGA contains a header section describing the content of the FPGA. The
- header lists the device id, PCI BAR, offset from the beginning of the PCI
- BAR, size in the FPGA, interrupt number and some other properties currently
- not handled by the MCB implementation.
-
-2.2 Carrier Devices
+
+This document is intended to be a short overview of the current
+implementation and does by no means describe the complete possibilities of MCB
+based devices.
+
+Limitations of the current implementation
+-----------------------------------------
+
+The current implementation is limited to PCI and PCIe based carrier devices
+that only use a single memory resource and share the PCI legacy IRQ. Not
+implemented are:
+
+- Multi-resource MCB devices like the VME Controller or M-Module carrier.
+- MCB devices that need another MCB device, like SRAM for a DMA Controller's
+ buffer descriptors or a video controller's video memory.
+- A per-carrier IRQ domain for carrier devices that have one (or more) IRQs
+ per MCB device like PCIe based carriers with MSI or MSI-X support.
+
+Architecture
+============
+
+MCB is divided into 3 functional blocks:
+
+- The MEN Chameleon Bus itself,
+- drivers for MCB Carrier Devices and
+- the parser for the Chameleon table.
+
+MEN Chameleon Bus
+-----------------
+
+The MEN Chameleon Bus is an artificial bus system that attaches to a so
+called Chameleon FPGA device found on some hardware produced my MEN Mikro
+Elektronik GmbH. These devices are multi-function devices implemented in a
+single FPGA and usually attached via some sort of PCI or PCIe link. Each
+FPGA contains a header section describing the content of the FPGA. The
+header lists the device id, PCI BAR, offset from the beginning of the PCI
+BAR, size in the FPGA, interrupt number and some other properties currently
+not handled by the MCB implementation.
+
+Carrier Devices
+---------------
+
+A carrier device is just an abstraction for the real world physical bus the
+Chameleon FPGA is attached to. Some IP Core drivers may need to interact with
+properties of the carrier device (like querying the IRQ number of a PCI
+device). To provide abstraction from the real hardware bus, an MCB carrier
+device provides callback methods to translate the driver's MCB function calls
+to hardware related function calls. For example a carrier device may
+implement the get_irq() method which can be translated into a hardware bus
+query for the IRQ number the device should use.
+
+Parser
+------
+
+The parser reads the first 512 bytes of a Chameleon device and parses the
+Chameleon table. Currently the parser only supports the Chameleon v2 variant
+of the Chameleon table but can easily be adopted to support an older or
+possible future variant. While parsing the table's entries new MCB devices
+are allocated and their resources are assigned according to the resource
+assignment in the Chameleon table. After resource assignment is finished, the
+MCB devices are registered at the MCB and thus at the driver core of the
+Linux kernel.
+
+Resource handling
+=================
+
+The current implementation assigns exactly one memory and one IRQ resource
+per MCB device. But this is likely going to change in the future.
+
+Memory Resources
+----------------
+
+Each MCB device has exactly one memory resource, which can be requested from
+the MCB bus. This memory resource is the physical address of the MCB device
+inside the carrier and is intended to be passed to ioremap() and friends. It
+is already requested from the kernel by calling request_mem_region().
+
+IRQs
+----
+
+Each MCB device has exactly one IRQ resource, which can be requested from the
+MCB bus. If a carrier device driver implements the ->get_irq() callback
+method, the IRQ number assigned by the carrier device will be returned,
+otherwise the IRQ number inside the Chameleon table will be returned. This
+number is suitable to be passed to request_irq().
+
+Writing an MCB driver
+=====================
+
+The driver structure
--------------------
- A carrier device is just an abstraction for the real world physical bus the
- Chameleon FPGA is attached to. Some IP Core drivers may need to interact with
- properties of the carrier device (like querying the IRQ number of a PCI
- device). To provide abstraction from the real hardware bus, an MCB carrier
- device provides callback methods to translate the driver's MCB function calls
- to hardware related function calls. For example a carrier device may
- implement the get_irq() method which can be translated into a hardware bus
- query for the IRQ number the device should use.
-
-2.3 Parser
------------
- The parser reads the first 512 bytes of a Chameleon device and parses the
- Chameleon table. Currently the parser only supports the Chameleon v2 variant
- of the Chameleon table but can easily be adopted to support an older or
- possible future variant. While parsing the table's entries new MCB devices
- are allocated and their resources are assigned according to the resource
- assignment in the Chameleon table. After resource assignment is finished, the
- MCB devices are registered at the MCB and thus at the driver core of the
- Linux kernel.
-
-3 Resource handling
-====================
- The current implementation assigns exactly one memory and one IRQ resource
- per MCB device. But this is likely going to change in the future.
-
-3.1 Memory Resources
+
+Each MCB driver has a structure to identify the device driver as well as
+device ids which identify the IP Core inside the FPGA. The driver structure
+also contains callback methods which get executed on driver probe and
+removal from the system::
+
+ static const struct mcb_device_id foo_ids[] = {
+ { .device = 0x123 },
+ { }
+ };
+ MODULE_DEVICE_TABLE(mcb, foo_ids);
+
+ static struct mcb_driver foo_driver = {
+ driver = {
+ .name = "foo-bar",
+ .owner = THIS_MODULE,
+ },
+ .probe = foo_probe,
+ .remove = foo_remove,
+ .id_table = foo_ids,
+ };
+
+Probing and attaching
---------------------
- Each MCB device has exactly one memory resource, which can be requested from
- the MCB bus. This memory resource is the physical address of the MCB device
- inside the carrier and is intended to be passed to ioremap() and friends. It
- is already requested from the kernel by calling request_mem_region().
-
-3.2 IRQs
----------
- Each MCB device has exactly one IRQ resource, which can be requested from the
- MCB bus. If a carrier device driver implements the ->get_irq() callback
- method, the IRQ number assigned by the carrier device will be returned,
- otherwise the IRQ number inside the Chameleon table will be returned. This
- number is suitable to be passed to request_irq().
-
-4 Writing an MCB driver
-=======================
-
-4.1 The driver structure
--------------------------
- Each MCB driver has a structure to identify the device driver as well as
- device ids which identify the IP Core inside the FPGA. The driver structure
- also contains callback methods which get executed on driver probe and
- removal from the system.
-
-
- static const struct mcb_device_id foo_ids[] = {
- { .device = 0x123 },
- { }
- };
- MODULE_DEVICE_TABLE(mcb, foo_ids);
-
- static struct mcb_driver foo_driver = {
- driver = {
- .name = "foo-bar",
- .owner = THIS_MODULE,
- },
- .probe = foo_probe,
- .remove = foo_remove,
- .id_table = foo_ids,
- };
-
-4.2 Probing and attaching
---------------------------
- When a driver is loaded and the MCB devices it services are found, the MCB
- core will call the driver's probe callback method. When the driver is removed
- from the system, the MCB core will call the driver's remove callback method.
-
-
- static init foo_probe(struct mcb_device *mdev, const struct mcb_device_id *id);
- static void foo_remove(struct mcb_device *mdev);
-
-4.3 Initializing the driver
-----------------------------
- When the kernel is booted or your foo driver module is inserted, you have to
- perform driver initialization. Usually it is enough to register your driver
- module at the MCB core.
-
-
- static int __init foo_init(void)
- {
- return mcb_register_driver(&foo_driver);
- }
- module_init(foo_init);
-
- static void __exit foo_exit(void)
- {
- mcb_unregister_driver(&foo_driver);
- }
- module_exit(foo_exit);
-
- The module_mcb_driver() macro can be used to reduce the above code.
-
-
- module_mcb_driver(foo_driver);
+
+When a driver is loaded and the MCB devices it services are found, the MCB
+core will call the driver's probe callback method. When the driver is removed
+from the system, the MCB core will call the driver's remove callback method::
+
+ static init foo_probe(struct mcb_device *mdev, const struct mcb_device_id *id);
+ static void foo_remove(struct mcb_device *mdev);
+
+Initializing the driver
+-----------------------
+
+When the kernel is booted or your foo driver module is inserted, you have to
+perform driver initialization. Usually it is enough to register your driver
+module at the MCB core::
+
+ static int __init foo_init(void)
+ {
+ return mcb_register_driver(&foo_driver);
+ }
+ module_init(foo_init);
+
+ static void __exit foo_exit(void)
+ {
+ mcb_unregister_driver(&foo_driver);
+ }
+ module_exit(foo_exit);
+
+The module_mcb_driver() macro can be used to reduce the above code::
+
+ module_mcb_driver(foo_driver);
diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt
index ae57b9ea0d41..69556f0d494b 100644
--- a/Documentation/nommu-mmap.txt
+++ b/Documentation/nommu-mmap.txt
@@ -1,6 +1,6 @@
- =============================
- NO-MMU MEMORY MAPPING SUPPORT
- =============================
+=============================
+No-MMU memory mapping support
+=============================
The kernel has limited support for memory mapping under no-MMU conditions, such
as are used in uClinux environments. From the userspace point of view, memory
@@ -16,7 +16,7 @@ the CLONE_VM flag.
The behaviour is similar between the MMU and no-MMU cases, but not identical;
and it's also much more restricted in the latter case:
- (*) Anonymous mapping, MAP_PRIVATE
+ (#) Anonymous mapping, MAP_PRIVATE
In the MMU case: VM regions backed by arbitrary pages; copy-on-write
across fork.
@@ -24,14 +24,14 @@ and it's also much more restricted in the latter case:
In the no-MMU case: VM regions backed by arbitrary contiguous runs of
pages.
- (*) Anonymous mapping, MAP_SHARED
+ (#) Anonymous mapping, MAP_SHARED
These behave very much like private mappings, except that they're
shared across fork() or clone() without CLONE_VM in the MMU case. Since
the no-MMU case doesn't support these, behaviour is identical to
MAP_PRIVATE there.
- (*) File, MAP_PRIVATE, PROT_READ / PROT_EXEC, !PROT_WRITE
+ (#) File, MAP_PRIVATE, PROT_READ / PROT_EXEC, !PROT_WRITE
In the MMU case: VM regions backed by pages read from file; changes to
the underlying file are reflected in the mapping; copied across fork.
@@ -56,7 +56,7 @@ and it's also much more restricted in the latter case:
are visible in other processes (no MMU protection), but should not
happen.
- (*) File, MAP_PRIVATE, PROT_READ / PROT_EXEC, PROT_WRITE
+ (#) File, MAP_PRIVATE, PROT_READ / PROT_EXEC, PROT_WRITE
In the MMU case: like the non-PROT_WRITE case, except that the pages in
question get copied before the write actually happens. From that point
@@ -66,7 +66,7 @@ and it's also much more restricted in the latter case:
In the no-MMU case: works much like the non-PROT_WRITE case, except
that a copy is always taken and never shared.
- (*) Regular file / blockdev, MAP_SHARED, PROT_READ / PROT_EXEC / PROT_WRITE
+ (#) Regular file / blockdev, MAP_SHARED, PROT_READ / PROT_EXEC / PROT_WRITE
In the MMU case: VM regions backed by pages read from file; changes to
pages written back to file; writes to file reflected into pages backing
@@ -74,7 +74,7 @@ and it's also much more restricted in the latter case:
In the no-MMU case: not supported.
- (*) Memory backed regular file, MAP_SHARED, PROT_READ / PROT_EXEC / PROT_WRITE
+ (#) Memory backed regular file, MAP_SHARED, PROT_READ / PROT_EXEC / PROT_WRITE
In the MMU case: As for ordinary regular files.
@@ -85,7 +85,7 @@ and it's also much more restricted in the latter case:
as for the MMU case. If the filesystem does not provide any such
support, then the mapping request will be denied.
- (*) Memory backed blockdev, MAP_SHARED, PROT_READ / PROT_EXEC / PROT_WRITE
+ (#) Memory backed blockdev, MAP_SHARED, PROT_READ / PROT_EXEC / PROT_WRITE
In the MMU case: As for ordinary regular files.
@@ -94,7 +94,7 @@ and it's also much more restricted in the latter case:
truncate being called. The ramdisk driver could do this if it allocated
all its memory as a contiguous array upfront.
- (*) Memory backed chardev, MAP_SHARED, PROT_READ / PROT_EXEC / PROT_WRITE
+ (#) Memory backed chardev, MAP_SHARED, PROT_READ / PROT_EXEC / PROT_WRITE
In the MMU case: As for ordinary regular files.
@@ -105,21 +105,20 @@ and it's also much more restricted in the latter case:
provide any such support, then the mapping request will be denied.
-============================
-FURTHER NOTES ON NO-MMU MMAP
+Further notes on no-MMU MMAP
============================
- (*) A request for a private mapping of a file may return a buffer that is not
+ (#) A request for a private mapping of a file may return a buffer that is not
page-aligned. This is because XIP may take place, and the data may not be
paged aligned in the backing store.
- (*) A request for an anonymous mapping will always be page aligned. If
+ (#) A request for an anonymous mapping will always be page aligned. If
possible the size of the request should be a power of two otherwise some
of the space may be wasted as the kernel must allocate a power-of-2
granule but will only discard the excess if appropriately configured as
this has an effect on fragmentation.
- (*) The memory allocated by a request for an anonymous mapping will normally
+ (#) The memory allocated by a request for an anonymous mapping will normally
be cleared by the kernel before being returned in accordance with the
Linux man pages (ver 2.22 or later).
@@ -145,24 +144,23 @@ FURTHER NOTES ON NO-MMU MMAP
uClibc uses this to speed up malloc(), and the ELF-FDPIC binfmt uses this
to allocate the brk and stack region.
- (*) A list of all the private copy and anonymous mappings on the system is
+ (#) A list of all the private copy and anonymous mappings on the system is
visible through /proc/maps in no-MMU mode.
- (*) A list of all the mappings in use by a process is visible through
+ (#) A list of all the mappings in use by a process is visible through
/proc/<pid>/maps in no-MMU mode.
- (*) Supplying MAP_FIXED or a requesting a particular mapping address will
+ (#) Supplying MAP_FIXED or a requesting a particular mapping address will
result in an error.
- (*) Files mapped privately usually have to have a read method provided by the
+ (#) Files mapped privately usually have to have a read method provided by the
driver or filesystem so that the contents can be read into the memory
allocated if mmap() chooses not to map the backing device directly. An
error will result if they don't. This is most likely to be encountered
with character device files, pipes, fifos and sockets.
-==========================
-INTERPROCESS SHARED MEMORY
+Interprocess shared memory
==========================
Both SYSV IPC SHM shared memory and POSIX shared memory is supported in NOMMU
@@ -170,8 +168,7 @@ mode. The former through the usual mechanism, the latter through files created
on ramfs or tmpfs mounts.
-=======
-FUTEXES
+Futexes
=======
Futexes are supported in NOMMU mode if the arch supports them. An error will
@@ -180,12 +177,11 @@ mappings made by a process or if the mapping in which the address lies does not
support futexes (such as an I/O chardev mapping).
-=============
-NO-MMU MREMAP
+No-MMU mremap
=============
The mremap() function is partially supported. It may change the size of a
-mapping, and may move it[*] if MREMAP_MAYMOVE is specified and if the new size
+mapping, and may move it [#]_ if MREMAP_MAYMOVE is specified and if the new size
of the mapping exceeds the size of the slab object currently occupied by the
memory to which the mapping refers, or if a smaller slab object could be used.
@@ -200,11 +196,10 @@ a previously mapped object. It may not be used to create holes in existing
mappings, move parts of existing mappings or resize parts of mappings. It must
act on a complete mapping.
-[*] Not currently supported.
+.. [#] Not currently supported.
-============================================
-PROVIDING SHAREABLE CHARACTER DEVICE SUPPORT
+Providing shareable character device support
============================================
To provide shareable character device support, a driver must provide a
@@ -235,7 +230,7 @@ direct the call to the device-specific driver. Under such circumstances, the
mapping request will be rejected if NOMMU_MAP_COPY is not specified, and a
copy mapped otherwise.
-IMPORTANT NOTE:
+.. important::
Some types of device may present a different appearance to anyone
looking at them in certain modes. Flash chips can be like this; for
@@ -249,8 +244,7 @@ IMPORTANT NOTE:
circumstances!
-==============================================
-PROVIDING SHAREABLE MEMORY-BACKED FILE SUPPORT
+Providing shareable memory-backed file support
==============================================
Provision of shared mappings on memory backed files is similar to the provision
@@ -267,8 +261,7 @@ Memory backed devices are indicated by the mapping's backing device info having
the memory_backed flag set.
-========================================
-PROVIDING SHAREABLE BLOCK DEVICE SUPPORT
+Providing shareable block device support
========================================
Provision of shared mappings on block device files is exactly the same as for
@@ -276,8 +269,7 @@ character devices. If there isn't a real device underneath, then the driver
should allocate sufficient contiguous memory to honour any supported mapping.
-=================================
-ADJUSTING PAGE TRIMMING BEHAVIOUR
+Adjusting page trimming behaviour
=================================
NOMMU mmap automatically rounds up to the nearest power-of-2 number of pages
@@ -288,4 +280,4 @@ allocator. In order to retain finer-grained control over fragmentation, this
behaviour can either be disabled completely, or bumped up to a higher page
watermark where trimming begins.
-Page trimming behaviour is configurable via the sysctl `vm.nr_trim_pages'.
+Page trimming behaviour is configurable via the sysctl ``vm.nr_trim_pages``.
diff --git a/Documentation/ntb.txt b/Documentation/ntb.txt
index 1d9bbabb6c79..a043854d28df 100644
--- a/Documentation/ntb.txt
+++ b/Documentation/ntb.txt
@@ -1,16 +1,21 @@
-# NTB Drivers
+===========
+NTB Drivers
+===========
NTB (Non-Transparent Bridge) is a type of PCI-Express bridge chip that connects
-the separate memory systems of two computers to the same PCI-Express fabric.
-Existing NTB hardware supports a common feature set, including scratchpad
-registers, doorbell registers, and memory translation windows. Scratchpad
-registers are read-and-writable registers that are accessible from either side
-of the device, so that peers can exchange a small amount of information at a
-fixed address. Doorbell registers provide a way for peers to send interrupt
-events. Memory windows allow translated read and write access to the peer
-memory.
-
-## NTB Core Driver (ntb)
+the separate memory systems of two or more computers to the same PCI-Express
+fabric. Existing NTB hardware supports a common feature set: doorbell
+registers and memory translation windows, as well as non common features like
+scratchpad and message registers. Scratchpad registers are read-and-writable
+registers that are accessible from either side of the device, so that peers can
+exchange a small amount of information at a fixed address. Message registers can
+be utilized for the same purpose. Additionally they are provided with with
+special status bits to make sure the information isn't rewritten by another
+peer. Doorbell registers provide a way for peers to send interrupt events.
+Memory windows allow translated read and write access to the peer memory.
+
+NTB Core Driver (ntb)
+=====================
The NTB core driver defines an api wrapping the common feature set, and allows
clients interested in NTB features to discover NTB the devices supported by
@@ -18,7 +23,8 @@ hardware drivers. The term "client" is used here to mean an upper layer
component making use of the NTB api. The term "driver," or "hardware driver,"
is used here to mean a driver for a specific vendor and model of NTB hardware.
-## NTB Client Drivers
+NTB Client Drivers
+==================
NTB client drivers should register with the NTB core driver. After
registering, the client probe and remove functions will be called appropriately
@@ -26,7 +32,90 @@ as ntb hardware, or hardware drivers, are inserted and removed. The
registration uses the Linux Device framework, so it should feel familiar to
anyone who has written a pci driver.
-### NTB Transport Client (ntb\_transport) and NTB Netdev (ntb\_netdev)
+NTB Typical client driver implementation
+----------------------------------------
+
+Primary purpose of NTB is to share some peace of memory between at least two
+systems. So the NTB device features like Scratchpad/Message registers are
+mainly used to perform the proper memory window initialization. Typically
+there are two types of memory window interfaces supported by the NTB API:
+inbound translation configured on the local ntb port and outbound translation
+configured by the peer, on the peer ntb port. The first type is
+depicted on the next figure
+
+Inbound translation:
+ Memory: Local NTB Port: Peer NTB Port: Peer MMIO:
+ ____________
+ | dma-mapped |-ntb_mw_set_trans(addr) |
+ | memory | _v____________ | ______________
+ | (addr) |<======| MW xlat addr |<====| MW base addr |<== memory-mapped IO
+ |------------| |--------------| | |--------------|
+
+So typical scenario of the first type memory window initialization looks:
+1) allocate a memory region, 2) put translated address to NTB config,
+3) somehow notify a peer device of performed initialization, 4) peer device
+maps corresponding outbound memory window so to have access to the shared
+memory region.
+
+The second type of interface, that implies the shared windows being
+initialized by a peer device, is depicted on the figure:
+
+Outbound translation:
+ Memory: Local NTB Port: Peer NTB Port: Peer MMIO:
+ ____________ ______________
+ | dma-mapped | | | MW base addr |<== memory-mapped IO
+ | memory | | |--------------|
+ | (addr) |<===================| MW xlat addr |<-ntb_peer_mw_set_trans(addr)
+ |------------| | |--------------|
+
+Typical scenario of the second type interface initialization would be:
+1) allocate a memory region, 2) somehow deliver a translated address to a peer
+device, 3) peer puts the translated address to NTB config, 4) peer device maps
+outbound memory window so to have access to the shared memory region.
+
+As one can see the described scenarios can be combined in one portable
+algorithm.
+ Local device:
+ 1) Allocate memory for a shared window
+ 2) Initialize memory window by translated address of the allocated region
+ (it may fail if local memory window initialization is unsupported)
+ 3) Send the translated address and memory window index to a peer device
+ Peer device:
+ 1) Initialize memory window with retrieved address of the allocated
+ by another device memory region (it may fail if peer memory window
+ initialization is unsupported)
+ 2) Map outbound memory window
+
+In accordance with this scenario, the NTB Memory Window API can be used as
+follows:
+ Local device:
+ 1) ntb_mw_count(pidx) - retrieve number of memory ranges, which can
+ be allocated for memory windows between local device and peer device
+ of port with specified index.
+ 2) ntb_get_align(pidx, midx) - retrieve parameters restricting the
+ shared memory region alignment and size. Then memory can be properly
+ allocated.
+ 3) Allocate physically contiguous memory region in compliance with
+ restrictions retrieved in 2).
+ 4) ntb_mw_set_trans(pidx, midx) - try to set translation address of
+ the memory window with specified index for the defined peer device
+ (it may fail if local translated address setting is not supported)
+ 5) Send translated base address (usually together with memory window
+ number) to the peer device using, for instance, scratchpad or message
+ registers.
+ Peer device:
+ 1) ntb_peer_mw_set_trans(pidx, midx) - try to set received from other
+ device (related to pidx) translated address for specified memory
+ window. It may fail if retrieved address, for instance, exceeds
+ maximum possible address or isn't properly aligned.
+ 2) ntb_peer_mw_get_addr(widx) - retrieve MMIO address to map the memory
+ window so to have an access to the shared memory.
+
+Also it is worth to note, that method ntb_mw_count(pidx) should return the
+same value as ntb_peer_mw_count() on the peer with port index - pidx.
+
+NTB Transport Client (ntb\_transport) and NTB Netdev (ntb\_netdev)
+------------------------------------------------------------------
The primary client for NTB is the Transport client, used in tandem with NTB
Netdev. These drivers function together to create a logical link to the peer,
@@ -37,7 +126,8 @@ Transport queue pair. Network data is copied between socket buffers and the
Transport queue pair buffer. The Transport client may be used for other things
besides Netdev, however no other applications have yet been written.
-### NTB Ping Pong Test Client (ntb\_pingpong)
+NTB Ping Pong Test Client (ntb\_pingpong)
+-----------------------------------------
The Ping Pong test client serves as a demonstration to exercise the doorbell
and scratchpad registers of NTB hardware, and as an example simple NTB client.
@@ -64,7 +154,8 @@ Module Parameters:
* dyndbg - It is suggested to specify dyndbg=+p when loading this module, and
then to observe debugging output on the console.
-### NTB Tool Test Client (ntb\_tool)
+NTB Tool Test Client (ntb\_tool)
+--------------------------------
The Tool test client serves for debugging, primarily, ntb hardware and drivers.
The Tool provides access through debugfs for reading, setting, and clearing the
@@ -74,48 +165,60 @@ The Tool does not currently have any module parameters.
Debugfs Files:
-* *debugfs*/ntb\_tool/*hw*/ - A directory in debugfs will be created for each
+* *debugfs*/ntb\_tool/*hw*/
+ A directory in debugfs will be created for each
NTB device probed by the tool. This directory is shortened to *hw*
below.
-* *hw*/db - This file is used to read, set, and clear the local doorbell. Not
+* *hw*/db
+ This file is used to read, set, and clear the local doorbell. Not
all operations may be supported by all hardware. To read the doorbell,
read the file. To set the doorbell, write `s` followed by the bits to
set (eg: `echo 's 0x0101' > db`). To clear the doorbell, write `c`
followed by the bits to clear.
-* *hw*/mask - This file is used to read, set, and clear the local doorbell mask.
+* *hw*/mask
+ This file is used to read, set, and clear the local doorbell mask.
See *db* for details.
-* *hw*/peer\_db - This file is used to read, set, and clear the peer doorbell.
+* *hw*/peer\_db
+ This file is used to read, set, and clear the peer doorbell.
See *db* for details.
-* *hw*/peer\_mask - This file is used to read, set, and clear the peer doorbell
+* *hw*/peer\_mask
+ This file is used to read, set, and clear the peer doorbell
mask. See *db* for details.
-* *hw*/spad - This file is used to read and write local scratchpads. To read
+* *hw*/spad
+ This file is used to read and write local scratchpads. To read
the values of all scratchpads, read the file. To write values, write a
series of pairs of scratchpad number and value
(eg: `echo '4 0x123 7 0xabc' > spad`
# to set scratchpads `4` and `7` to `0x123` and `0xabc`, respectively).
-* *hw*/peer\_spad - This file is used to read and write peer scratchpads. See
+* *hw*/peer\_spad
+ This file is used to read and write peer scratchpads. See
*spad* for details.
-## NTB Hardware Drivers
+NTB Hardware Drivers
+====================
NTB hardware drivers should register devices with the NTB core driver. After
registering, clients probe and remove functions will be called.
-### NTB Intel Hardware Driver (ntb\_hw\_intel)
+NTB Intel Hardware Driver (ntb\_hw\_intel)
+------------------------------------------
The Intel hardware driver supports NTB on Xeon and Atom CPUs.
Module Parameters:
-* b2b\_mw\_idx - If the peer ntb is to be accessed via a memory window, then use
+* b2b\_mw\_idx
+ If the peer ntb is to be accessed via a memory window, then use
this memory window to access the peer ntb. A value of zero or positive
starts from the first mw idx, and a negative value starts from the last
mw idx. Both sides MUST set the same value here! The default value is
`-1`.
-* b2b\_mw\_share - If the peer ntb is to be accessed via a memory window, and if
+* b2b\_mw\_share
+ If the peer ntb is to be accessed via a memory window, and if
the memory window is large enough, still allow the client to use the
second half of the memory window for address translation to the peer.
-* xeon\_b2b\_usd\_bar2\_addr64 - If using B2B topology on Xeon hardware, use
+* xeon\_b2b\_usd\_bar2\_addr64
+ If using B2B topology on Xeon hardware, use
this 64 bit address on the bus between the NTB devices for the window
at BAR2, on the upstream side of the link.
* xeon\_b2b\_usd\_bar4\_addr64 - See *xeon\_b2b\_bar2\_addr64*.
diff --git a/Documentation/numastat.txt b/Documentation/numastat.txt
index 520327790d54..aaf1667489f8 100644
--- a/Documentation/numastat.txt
+++ b/Documentation/numastat.txt
@@ -1,10 +1,12 @@
-
+===============================
Numa policy hit/miss statistics
+===============================
/sys/devices/system/node/node*/numastat
All units are pages. Hugepages have separate counters.
+=============== ============================================================
numa_hit A process wanted to allocate memory from this node,
and succeeded.
@@ -20,6 +22,7 @@ other_node A process ran on this node and got memory from another node.
interleave_hit Interleaving wanted to allocate from this node
and succeeded.
+=============== ============================================================
For easier reading you can use the numastat utility from the numactl package
(http://oss.sgi.com/projects/libnuma/). Note that it only works
diff --git a/Documentation/padata.txt b/Documentation/padata.txt
index 7ddfe216a0aa..b103d0c82000 100644
--- a/Documentation/padata.txt
+++ b/Documentation/padata.txt
@@ -1,5 +1,8 @@
+=======================================
The padata parallel execution mechanism
-Last updated for 2.6.36
+=======================================
+
+:Last updated: for 2.6.36
Padata is a mechanism by which the kernel can farm work out to be done in
parallel on multiple CPUs while retaining the ordering of tasks. It was
@@ -9,7 +12,7 @@ those packets. The crypto developers made a point of writing padata in a
sufficiently general fashion that it could be put to other uses as well.
The first step in using padata is to set up a padata_instance structure for
-overall control of how tasks are to be run:
+overall control of how tasks are to be run::
#include <linux/padata.h>
@@ -24,7 +27,7 @@ The workqueue wq is where the work will actually be done; it should be
a multithreaded queue, naturally.
To allocate a padata instance with the cpu_possible_mask for both
-cpumasks this helper function can be used:
+cpumasks this helper function can be used::
struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq);
@@ -36,7 +39,7 @@ it is legal to supply a cpumask to padata that contains offline CPUs.
Once an offline CPU in the user supplied cpumask comes online, padata
is going to use it.
-There are functions for enabling and disabling the instance:
+There are functions for enabling and disabling the instance::
int padata_start(struct padata_instance *pinst);
void padata_stop(struct padata_instance *pinst);
@@ -48,7 +51,7 @@ padata cpumask contains no active CPU (flag not set).
padata_stop clears the flag and blocks until the padata instance
is unused.
-The list of CPUs to be used can be adjusted with these functions:
+The list of CPUs to be used can be adjusted with these functions::
int padata_set_cpumasks(struct padata_instance *pinst,
cpumask_var_t pcpumask,
@@ -71,12 +74,12 @@ padata_add_cpu/padata_remove_cpu are used. cpu specifies the CPU to add or
remove and mask is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL.
If a user is interested in padata cpumask changes, he can register to
-the padata cpumask change notifier:
+the padata cpumask change notifier::
int padata_register_cpumask_notifier(struct padata_instance *pinst,
struct notifier_block *nblock);
-To unregister from that notifier:
+To unregister from that notifier::
int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
struct notifier_block *nblock);
@@ -84,7 +87,7 @@ To unregister from that notifier:
The padata cpumask change notifier notifies about changes of the usable
cpumasks, i.e. the subset of active CPUs in the user supplied cpumask.
-Padata calls the notifier chain with:
+Padata calls the notifier chain with::
blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
notification_mask,
@@ -95,7 +98,7 @@ is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL and cpumask is a pointer
to a struct padata_cpumask that contains the new cpumask information.
Actually submitting work to the padata instance requires the creation of a
-padata_priv structure:
+padata_priv structure::
struct padata_priv {
/* Other stuff here... */
@@ -110,7 +113,7 @@ parallel() and serial() functions should be provided. Those functions will
be called in the process of getting the work done as we will see
momentarily.
-The submission of work is done with:
+The submission of work is done with::
int padata_do_parallel(struct padata_instance *pinst,
struct padata_priv *padata, int cb_cpu);
@@ -138,7 +141,7 @@ need not be completed during this call, but, if parallel() leaves work
outstanding, it should be prepared to be called again with a new job before
the previous one completes. When a task does complete, parallel() (or
whatever function actually finishes the job) should inform padata of the
-fact with a call to:
+fact with a call to::
void padata_do_serial(struct padata_priv *padata);
@@ -151,7 +154,7 @@ pains to ensure that tasks are completed in the order in which they were
submitted.
The one remaining function in the padata API should be called to clean up
-when a padata instance is no longer needed:
+when a padata instance is no longer needed::
void padata_free(struct padata_instance *pinst);
diff --git a/Documentation/parport-lowlevel.txt b/Documentation/parport-lowlevel.txt
index 120eb20dbb09..0633d70ffda7 100644
--- a/Documentation/parport-lowlevel.txt
+++ b/Documentation/parport-lowlevel.txt
@@ -1,11 +1,12 @@
+===============================
PARPORT interface documentation
--------------------------------
+===============================
-Time-stamp: <2000-02-24 13:30:20 twaugh>
+:Time-stamp: <2000-02-24 13:30:20 twaugh>
Described here are the following functions:
-Global functions:
+Global functions::
parport_register_driver
parport_unregister_driver
parport_enumerate
@@ -31,7 +32,8 @@ Global functions:
parport_set_timeout
Port functions (can be overridden by low-level drivers):
- SPP:
+
+ SPP::
port->ops->read_data
port->ops->write_data
port->ops->read_status
@@ -43,23 +45,23 @@ Port functions (can be overridden by low-level drivers):
port->ops->data_forward
port->ops->data_reverse
- EPP:
+ EPP::
port->ops->epp_write_data
port->ops->epp_read_data
port->ops->epp_write_addr
port->ops->epp_read_addr
- ECP:
+ ECP::
port->ops->ecp_write_data
port->ops->ecp_read_data
port->ops->ecp_write_addr
- Other:
+ Other::
port->ops->nibble_read_data
port->ops->byte_read_data
port->ops->compat_write_data
-The parport subsystem comprises 'parport' (the core port-sharing
+The parport subsystem comprises ``parport`` (the core port-sharing
code), and a variety of low-level drivers that actually do the port
accesses. Each low-level driver handles a particular style of port
(PC, Amiga, and so on).
@@ -70,14 +72,14 @@ into global functions and port functions.
The global functions are mostly for communicating between the device
driver and the parport subsystem: acquiring a list of available ports,
claiming a port for exclusive use, and so on. They also include
-'generic' functions for doing standard things that will work on any
+``generic`` functions for doing standard things that will work on any
IEEE 1284-capable architecture.
The port functions are provided by the low-level drivers, although the
-core parport module provides generic 'defaults' for some routines.
+core parport module provides generic ``defaults`` for some routines.
The port functions can be split into three groups: SPP, EPP, and ECP.
-SPP (Standard Parallel Port) functions modify so-called 'SPP'
+SPP (Standard Parallel Port) functions modify so-called ``SPP``
registers: data, status, and control. The hardware may not actually
have registers exactly like that, but the PC does and this interface is
modelled after common PC implementations. Other low-level drivers may
@@ -95,58 +97,63 @@ to cope with peripherals that only tenuously support IEEE 1284, a
low-level driver specific function is provided, for altering 'fudge
factors'.
-GLOBAL FUNCTIONS
-----------------
+Global functions
+================
parport_register_driver - register a device driver with parport
------------------------
+---------------------------------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-struct parport_driver {
- const char *name;
- void (*attach) (struct parport *);
- void (*detach) (struct parport *);
- struct parport_driver *next;
-};
-int parport_register_driver (struct parport_driver *driver);
+ struct parport_driver {
+ const char *name;
+ void (*attach) (struct parport *);
+ void (*detach) (struct parport *);
+ struct parport_driver *next;
+ };
+ int parport_register_driver (struct parport_driver *driver);
DESCRIPTION
+^^^^^^^^^^^
In order to be notified about parallel ports when they are detected,
parport_register_driver should be called. Your driver will
immediately be notified of all ports that have already been detected,
and of each new port as low-level drivers are loaded.
-A 'struct parport_driver' contains the textual name of your driver,
+A ``struct parport_driver`` contains the textual name of your driver,
a pointer to a function to handle new ports, and a pointer to a
function to handle ports going away due to a low-level driver
unloading. Ports will only be detached if they are not being used
(i.e. there are no devices registered on them).
-The visible parts of the 'struct parport *' argument given to
-attach/detach are:
-
-struct parport
-{
- struct parport *next; /* next parport in list */
- const char *name; /* port's name */
- unsigned int modes; /* bitfield of hardware modes */
- struct parport_device_info probe_info;
- /* IEEE1284 info */
- int number; /* parport index */
- struct parport_operations *ops;
- ...
-};
+The visible parts of the ``struct parport *`` argument given to
+attach/detach are::
+
+ struct parport
+ {
+ struct parport *next; /* next parport in list */
+ const char *name; /* port's name */
+ unsigned int modes; /* bitfield of hardware modes */
+ struct parport_device_info probe_info;
+ /* IEEE1284 info */
+ int number; /* parport index */
+ struct parport_operations *ops;
+ ...
+ };
There are other members of the structure, but they should not be
touched.
-The 'modes' member summarises the capabilities of the underlying
+The ``modes`` member summarises the capabilities of the underlying
hardware. It consists of flags which may be bitwise-ored together:
+ ============================= ===============================================
PARPORT_MODE_PCSPP IBM PC registers are available,
i.e. functions that act on data,
control and status registers are
@@ -169,297 +176,351 @@ hardware. It consists of flags which may be bitwise-ored together:
GFP_DMA flag with kmalloc) to the
low-level driver in order to take
advantage of it.
+ ============================= ===============================================
-There may be other flags in 'modes' as well.
+There may be other flags in ``modes`` as well.
-The contents of 'modes' is advisory only. For example, if the
-hardware is capable of DMA, and PARPORT_MODE_DMA is in 'modes', it
+The contents of ``modes`` is advisory only. For example, if the
+hardware is capable of DMA, and PARPORT_MODE_DMA is in ``modes``, it
doesn't necessarily mean that DMA will always be used when possible.
Similarly, hardware that is capable of assisting ECP transfers won't
necessarily be used.
RETURN VALUE
+^^^^^^^^^^^^
Zero on success, otherwise an error code.
ERRORS
+^^^^^^
None. (Can it fail? Why return int?)
EXAMPLE
+^^^^^^^
-static void lp_attach (struct parport *port)
-{
- ...
- private = kmalloc (...);
- dev[count++] = parport_register_device (...);
- ...
-}
+::
-static void lp_detach (struct parport *port)
-{
- ...
-}
+ static void lp_attach (struct parport *port)
+ {
+ ...
+ private = kmalloc (...);
+ dev[count++] = parport_register_device (...);
+ ...
+ }
-static struct parport_driver lp_driver = {
- "lp",
- lp_attach,
- lp_detach,
- NULL /* always put NULL here */
-};
+ static void lp_detach (struct parport *port)
+ {
+ ...
+ }
-int lp_init (void)
-{
- ...
- if (parport_register_driver (&lp_driver)) {
- /* Failed; nothing we can do. */
- return -EIO;
+ static struct parport_driver lp_driver = {
+ "lp",
+ lp_attach,
+ lp_detach,
+ NULL /* always put NULL here */
+ };
+
+ int lp_init (void)
+ {
+ ...
+ if (parport_register_driver (&lp_driver)) {
+ /* Failed; nothing we can do. */
+ return -EIO;
+ }
+ ...
}
- ...
-}
+
SEE ALSO
+^^^^^^^^
parport_unregister_driver, parport_register_device, parport_enumerate
-
+
+
+
parport_unregister_driver - tell parport to forget about this driver
--------------------------
+--------------------------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_driver {
- const char *name;
- void (*attach) (struct parport *);
- void (*detach) (struct parport *);
- struct parport_driver *next;
-};
-void parport_unregister_driver (struct parport_driver *driver);
+ #include <linux/parport.h>
+
+ struct parport_driver {
+ const char *name;
+ void (*attach) (struct parport *);
+ void (*detach) (struct parport *);
+ struct parport_driver *next;
+ };
+ void parport_unregister_driver (struct parport_driver *driver);
DESCRIPTION
+^^^^^^^^^^^
This tells parport not to notify the device driver of new ports or of
ports going away. Registered devices belonging to that driver are NOT
unregistered: parport_unregister_device must be used for each one.
EXAMPLE
+^^^^^^^
-void cleanup_module (void)
-{
- ...
- /* Stop notifications. */
- parport_unregister_driver (&lp_driver);
+::
- /* Unregister devices. */
- for (i = 0; i < NUM_DEVS; i++)
- parport_unregister_device (dev[i]);
- ...
-}
+ void cleanup_module (void)
+ {
+ ...
+ /* Stop notifications. */
+ parport_unregister_driver (&lp_driver);
+
+ /* Unregister devices. */
+ for (i = 0; i < NUM_DEVS; i++)
+ parport_unregister_device (dev[i]);
+ ...
+ }
SEE ALSO
+^^^^^^^^
parport_register_driver, parport_enumerate
-
+
+
+
parport_enumerate - retrieve a list of parallel ports (DEPRECATED)
------------------
+------------------------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport *parport_enumerate (void);
+ #include <linux/parport.h>
+
+ struct parport *parport_enumerate (void);
DESCRIPTION
+^^^^^^^^^^^
Retrieve the first of a list of valid parallel ports for this machine.
-Successive parallel ports can be found using the 'struct parport
-*next' element of the 'struct parport *' that is returned. If 'next'
+Successive parallel ports can be found using the ``struct parport
+*next`` element of the ``struct parport *`` that is returned. If ``next``
is NULL, there are no more parallel ports in the list. The number of
ports in the list will not exceed PARPORT_MAX.
RETURN VALUE
+^^^^^^^^^^^^
-A 'struct parport *' describing a valid parallel port for the machine,
+A ``struct parport *`` describing a valid parallel port for the machine,
or NULL if there are none.
ERRORS
+^^^^^^
This function can return NULL to indicate that there are no parallel
ports to use.
EXAMPLE
+^^^^^^^
+
+::
-int detect_device (void)
-{
- struct parport *port;
+ int detect_device (void)
+ {
+ struct parport *port;
+
+ for (port = parport_enumerate ();
+ port != NULL;
+ port = port->next) {
+ /* Try to detect a device on the port... */
+ ...
+ }
+ }
- for (port = parport_enumerate ();
- port != NULL;
- port = port->next) {
- /* Try to detect a device on the port... */
...
- }
}
- ...
-}
-
NOTES
+^^^^^
parport_enumerate is deprecated; parport_register_driver should be
used instead.
SEE ALSO
+^^^^^^^^
parport_register_driver, parport_unregister_driver
-
+
+
+
parport_register_device - register to use a port
------------------------
+------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-typedef int (*preempt_func) (void *handle);
-typedef void (*wakeup_func) (void *handle);
-typedef int (*irq_func) (int irq, void *handle, struct pt_regs *);
+ #include <linux/parport.h>
-struct pardevice *parport_register_device(struct parport *port,
- const char *name,
- preempt_func preempt,
- wakeup_func wakeup,
- irq_func irq,
- int flags,
- void *handle);
+ typedef int (*preempt_func) (void *handle);
+ typedef void (*wakeup_func) (void *handle);
+ typedef int (*irq_func) (int irq, void *handle, struct pt_regs *);
+
+ struct pardevice *parport_register_device(struct parport *port,
+ const char *name,
+ preempt_func preempt,
+ wakeup_func wakeup,
+ irq_func irq,
+ int flags,
+ void *handle);
DESCRIPTION
+^^^^^^^^^^^
Use this function to register your device driver on a parallel port
-('port'). Once you have done that, you will be able to use
+(``port``). Once you have done that, you will be able to use
parport_claim and parport_release in order to use the port.
-The ('name') argument is the name of the device that appears in /proc
+The (``name``) argument is the name of the device that appears in /proc
filesystem. The string must be valid for the whole lifetime of the
device (until parport_unregister_device is called).
This function will register three callbacks into your driver:
-'preempt', 'wakeup' and 'irq'. Each of these may be NULL in order to
+``preempt``, ``wakeup`` and ``irq``. Each of these may be NULL in order to
indicate that you do not want a callback.
-When the 'preempt' function is called, it is because another driver
-wishes to use the parallel port. The 'preempt' function should return
+When the ``preempt`` function is called, it is because another driver
+wishes to use the parallel port. The ``preempt`` function should return
non-zero if the parallel port cannot be released yet -- if zero is
returned, the port is lost to another driver and the port must be
re-claimed before use.
-The 'wakeup' function is called once another driver has released the
+The ``wakeup`` function is called once another driver has released the
port and no other driver has yet claimed it. You can claim the
-parallel port from within the 'wakeup' function (in which case the
+parallel port from within the ``wakeup`` function (in which case the
claim is guaranteed to succeed), or choose not to if you don't need it
now.
If an interrupt occurs on the parallel port your driver has claimed,
-the 'irq' function will be called. (Write something about shared
+the ``irq`` function will be called. (Write something about shared
interrupts here.)
-The 'handle' is a pointer to driver-specific data, and is passed to
+The ``handle`` is a pointer to driver-specific data, and is passed to
the callback functions.
-'flags' may be a bitwise combination of the following flags:
+``flags`` may be a bitwise combination of the following flags:
+ ===================== =================================================
Flag Meaning
+ ===================== =================================================
PARPORT_DEV_EXCL The device cannot share the parallel port at all.
Use this only when absolutely necessary.
+ ===================== =================================================
The typedefs are not actually defined -- they are only shown in order
to make the function prototype more readable.
-The visible parts of the returned 'struct pardevice' are:
+The visible parts of the returned ``struct pardevice`` are::
-struct pardevice {
- struct parport *port; /* Associated port */
- void *private; /* Device driver's 'handle' */
- ...
-};
+ struct pardevice {
+ struct parport *port; /* Associated port */
+ void *private; /* Device driver's 'handle' */
+ ...
+ };
RETURN VALUE
+^^^^^^^^^^^^
-A 'struct pardevice *': a handle to the registered parallel port
+A ``struct pardevice *``: a handle to the registered parallel port
device that can be used for parport_claim, parport_release, etc.
ERRORS
+^^^^^^
A return value of NULL indicates that there was a problem registering
a device on that port.
EXAMPLE
+^^^^^^^
+
+::
+
+ static int preempt (void *handle)
+ {
+ if (busy_right_now)
+ return 1;
+
+ must_reclaim_port = 1;
+ return 0;
+ }
+
+ static void wakeup (void *handle)
+ {
+ struct toaster *private = handle;
+ struct pardevice *dev = private->dev;
+ if (!dev) return; /* avoid races */
+
+ if (want_port)
+ parport_claim (dev);
+ }
+
+ static int toaster_detect (struct toaster *private, struct parport *port)
+ {
+ private->dev = parport_register_device (port, "toaster", preempt,
+ wakeup, NULL, 0,
+ private);
+ if (!private->dev)
+ /* Couldn't register with parport. */
+ return -EIO;
-static int preempt (void *handle)
-{
- if (busy_right_now)
- return 1;
-
- must_reclaim_port = 1;
- return 0;
-}
-
-static void wakeup (void *handle)
-{
- struct toaster *private = handle;
- struct pardevice *dev = private->dev;
- if (!dev) return; /* avoid races */
-
- if (want_port)
- parport_claim (dev);
-}
-
-static int toaster_detect (struct toaster *private, struct parport *port)
-{
- private->dev = parport_register_device (port, "toaster", preempt,
- wakeup, NULL, 0,
- private);
- if (!private->dev)
- /* Couldn't register with parport. */
- return -EIO;
-
- must_reclaim_port = 0;
- busy_right_now = 1;
- parport_claim_or_block (private->dev);
- ...
- /* Don't need the port while the toaster warms up. */
- busy_right_now = 0;
- ...
- busy_right_now = 1;
- if (must_reclaim_port) {
- parport_claim_or_block (private->dev);
must_reclaim_port = 0;
+ busy_right_now = 1;
+ parport_claim_or_block (private->dev);
+ ...
+ /* Don't need the port while the toaster warms up. */
+ busy_right_now = 0;
+ ...
+ busy_right_now = 1;
+ if (must_reclaim_port) {
+ parport_claim_or_block (private->dev);
+ must_reclaim_port = 0;
+ }
+ ...
}
- ...
-}
SEE ALSO
+^^^^^^^^
parport_unregister_device, parport_claim
+
+
parport_unregister_device - finish using a port
--------------------------
+-----------------------------------------------
SYNPOPSIS
-#include <linux/parport.h>
+::
+
+ #include <linux/parport.h>
-void parport_unregister_device (struct pardevice *dev);
+ void parport_unregister_device (struct pardevice *dev);
DESCRIPTION
+^^^^^^^^^^^
This function is the opposite of parport_register_device. After using
-parport_unregister_device, 'dev' is no longer a valid device handle.
+parport_unregister_device, ``dev`` is no longer a valid device handle.
You should not unregister a device that is currently claimed, although
if you do it will be released automatically.
EXAMPLE
+^^^^^^^
+
+::
...
kfree (dev->private); /* before we lose the pointer */
@@ -467,460 +528,602 @@ EXAMPLE
...
SEE ALSO
+^^^^^^^^
+
parport_unregister_driver
parport_claim, parport_claim_or_block - claim the parallel port for a device
--------------------------------------
+----------------------------------------------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-int parport_claim (struct pardevice *dev);
-int parport_claim_or_block (struct pardevice *dev);
+ int parport_claim (struct pardevice *dev);
+ int parport_claim_or_block (struct pardevice *dev);
DESCRIPTION
+^^^^^^^^^^^
These functions attempt to gain control of the parallel port on which
-'dev' is registered. 'parport_claim' does not block, but
-'parport_claim_or_block' may do. (Put something here about blocking
+``dev`` is registered. ``parport_claim`` does not block, but
+``parport_claim_or_block`` may do. (Put something here about blocking
interruptibly or non-interruptibly.)
You should not try to claim a port that you have already claimed.
RETURN VALUE
+^^^^^^^^^^^^
A return value of zero indicates that the port was successfully
claimed, and the caller now has possession of the parallel port.
-If 'parport_claim_or_block' blocks before returning successfully, the
+If ``parport_claim_or_block`` blocks before returning successfully, the
return value is positive.
ERRORS
+^^^^^^
+========== ==========================================================
-EAGAIN The port is unavailable at the moment, but another attempt
to claim it may succeed.
+========== ==========================================================
SEE ALSO
+^^^^^^^^
+
parport_release
parport_release - release the parallel port
----------------
+-------------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-void parport_release (struct pardevice *dev);
+ void parport_release (struct pardevice *dev);
DESCRIPTION
+^^^^^^^^^^^
Once a parallel port device has been claimed, it can be released using
-'parport_release'. It cannot fail, but you should not release a
+``parport_release``. It cannot fail, but you should not release a
device that you do not have possession of.
EXAMPLE
+^^^^^^^
-static size_t write (struct pardevice *dev, const void *buf,
- size_t len)
-{
- ...
- written = dev->port->ops->write_ecp_data (dev->port, buf,
- len);
- parport_release (dev);
- ...
-}
+::
+
+ static size_t write (struct pardevice *dev, const void *buf,
+ size_t len)
+ {
+ ...
+ written = dev->port->ops->write_ecp_data (dev->port, buf,
+ len);
+ parport_release (dev);
+ ...
+ }
SEE ALSO
+^^^^^^^^
change_mode, parport_claim, parport_claim_or_block, parport_yield
-
+
+
+
parport_yield, parport_yield_blocking - temporarily release a parallel port
--------------------------------------
+---------------------------------------------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-int parport_yield (struct pardevice *dev)
-int parport_yield_blocking (struct pardevice *dev);
+ int parport_yield (struct pardevice *dev)
+ int parport_yield_blocking (struct pardevice *dev);
DESCRIPTION
+^^^^^^^^^^^
When a driver has control of a parallel port, it may allow another
-driver to temporarily 'borrow' it. 'parport_yield' does not block;
-'parport_yield_blocking' may do.
+driver to temporarily ``borrow`` it. ``parport_yield`` does not block;
+``parport_yield_blocking`` may do.
RETURN VALUE
+^^^^^^^^^^^^
A return value of zero indicates that the caller still owns the port
and the call did not block.
-A positive return value from 'parport_yield_blocking' indicates that
+A positive return value from ``parport_yield_blocking`` indicates that
the caller still owns the port and the call blocked.
A return value of -EAGAIN indicates that the caller no longer owns the
port, and it must be re-claimed before use.
ERRORS
+^^^^^^
+========= ==========================================================
-EAGAIN Ownership of the parallel port was given away.
+========= ==========================================================
SEE ALSO
+^^^^^^^^
parport_release
+
+
parport_wait_peripheral - wait for status lines, up to 35ms
------------------------
+-----------------------------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-int parport_wait_peripheral (struct parport *port,
- unsigned char mask,
- unsigned char val);
+ int parport_wait_peripheral (struct parport *port,
+ unsigned char mask,
+ unsigned char val);
DESCRIPTION
+^^^^^^^^^^^
Wait for the status lines in mask to match the values in val.
RETURN VALUE
+^^^^^^^^^^^^
+======== ==========================================================
-EINTR a signal is pending
0 the status lines in mask have values in val
1 timed out while waiting (35ms elapsed)
+======== ==========================================================
SEE ALSO
+^^^^^^^^
parport_poll_peripheral
+
+
parport_poll_peripheral - wait for status lines, in usec
------------------------
+--------------------------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-int parport_poll_peripheral (struct parport *port,
- unsigned char mask,
- unsigned char val,
- int usec);
+ int parport_poll_peripheral (struct parport *port,
+ unsigned char mask,
+ unsigned char val,
+ int usec);
DESCRIPTION
+^^^^^^^^^^^
Wait for the status lines in mask to match the values in val.
RETURN VALUE
+^^^^^^^^^^^^
+======== ==========================================================
-EINTR a signal is pending
0 the status lines in mask have values in val
1 timed out while waiting (usec microseconds have elapsed)
+======== ==========================================================
SEE ALSO
+^^^^^^^^
parport_wait_peripheral
-
+
+
+
parport_wait_event - wait for an event on a port
-------------------
+------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-int parport_wait_event (struct parport *port, signed long timeout)
+ #include <linux/parport.h>
+
+ int parport_wait_event (struct parport *port, signed long timeout)
DESCRIPTION
+^^^^^^^^^^^
Wait for an event (e.g. interrupt) on a port. The timeout is in
jiffies.
RETURN VALUE
+^^^^^^^^^^^^
+======= ==========================================================
0 success
<0 error (exit as soon as possible)
>0 timed out
-
+======= ==========================================================
+
parport_negotiate - perform IEEE 1284 negotiation
------------------
+-------------------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-int parport_negotiate (struct parport *, int mode);
+ int parport_negotiate (struct parport *, int mode);
DESCRIPTION
+^^^^^^^^^^^
Perform IEEE 1284 negotiation.
RETURN VALUE
+^^^^^^^^^^^^
+======= ==========================================================
0 handshake OK; IEEE 1284 peripheral and mode available
-1 handshake failed; peripheral not compliant (or none present)
1 handshake OK; IEEE 1284 peripheral present but mode not
available
+======= ==========================================================
SEE ALSO
+^^^^^^^^
parport_read, parport_write
-
+
+
+
parport_read - read data from device
-------------
+------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-ssize_t parport_read (struct parport *, void *buf, size_t len);
+ ssize_t parport_read (struct parport *, void *buf, size_t len);
DESCRIPTION
+^^^^^^^^^^^
Read data from device in current IEEE 1284 transfer mode. This only
works for modes that support reverse data transfer.
RETURN VALUE
+^^^^^^^^^^^^
If negative, an error code; otherwise the number of bytes transferred.
SEE ALSO
+^^^^^^^^
parport_write, parport_negotiate
-
+
+
+
parport_write - write data to device
--------------
+------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-ssize_t parport_write (struct parport *, const void *buf, size_t len);
+ ssize_t parport_write (struct parport *, const void *buf, size_t len);
DESCRIPTION
+^^^^^^^^^^^
Write data to device in current IEEE 1284 transfer mode. This only
works for modes that support forward data transfer.
RETURN VALUE
+^^^^^^^^^^^^
If negative, an error code; otherwise the number of bytes transferred.
SEE ALSO
+^^^^^^^^
parport_read, parport_negotiate
+
+
parport_open - register device for particular device number
-------------
+-----------------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct pardevice *parport_open (int devnum, const char *name,
- int (*pf) (void *),
- void (*kf) (void *),
- void (*irqf) (int, void *,
- struct pt_regs *),
- int flags, void *handle);
+ #include <linux/parport.h>
+
+ struct pardevice *parport_open (int devnum, const char *name,
+ int (*pf) (void *),
+ void (*kf) (void *),
+ void (*irqf) (int, void *,
+ struct pt_regs *),
+ int flags, void *handle);
DESCRIPTION
+^^^^^^^^^^^
This is like parport_register_device but takes a device number instead
of a pointer to a struct parport.
RETURN VALUE
+^^^^^^^^^^^^
See parport_register_device. If no device is associated with devnum,
NULL is returned.
SEE ALSO
+^^^^^^^^
parport_register_device
-
+
+
+
parport_close - unregister device for particular device number
--------------
+--------------------------------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-void parport_close (struct pardevice *dev);
+ void parport_close (struct pardevice *dev);
DESCRIPTION
+^^^^^^^^^^^
This is the equivalent of parport_unregister_device for parport_open.
SEE ALSO
+^^^^^^^^
parport_unregister_device, parport_open
-
+
+
+
parport_device_id - obtain IEEE 1284 Device ID
------------------
+----------------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-ssize_t parport_device_id (int devnum, char *buffer, size_t len);
+ ssize_t parport_device_id (int devnum, char *buffer, size_t len);
DESCRIPTION
+^^^^^^^^^^^
Obtains the IEEE 1284 Device ID associated with a given device.
RETURN VALUE
+^^^^^^^^^^^^
If negative, an error code; otherwise, the number of bytes of buffer
that contain the device ID. The format of the device ID is as
-follows:
+follows::
-[length][ID]
+ [length][ID]
The first two bytes indicate the inclusive length of the entire Device
ID, and are in big-endian order. The ID is a sequence of pairs of the
-form:
+form::
-key:value;
+ key:value;
NOTES
+^^^^^
Many devices have ill-formed IEEE 1284 Device IDs.
SEE ALSO
+^^^^^^^^
parport_find_class, parport_find_device
-
+
+
+
parport_device_coords - convert device number to device coordinates
-------------------
+-------------------------------------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-int parport_device_coords (int devnum, int *parport, int *mux,
- int *daisy);
+ int parport_device_coords (int devnum, int *parport, int *mux,
+ int *daisy);
DESCRIPTION
+^^^^^^^^^^^
Convert between device number (zero-based) and device coordinates
(port, multiplexor, daisy chain address).
RETURN VALUE
+^^^^^^^^^^^^
-Zero on success, in which case the coordinates are (*parport, *mux,
-*daisy).
+Zero on success, in which case the coordinates are (``*parport``, ``*mux``,
+``*daisy``).
SEE ALSO
+^^^^^^^^
parport_open, parport_device_id
-
+
+
+
parport_find_class - find a device by its class
-------------------
+-----------------------------------------------
SYNOPSIS
-
-#include <linux/parport.h>
-
-typedef enum {
- PARPORT_CLASS_LEGACY = 0, /* Non-IEEE1284 device */
- PARPORT_CLASS_PRINTER,
- PARPORT_CLASS_MODEM,
- PARPORT_CLASS_NET,
- PARPORT_CLASS_HDC, /* Hard disk controller */
- PARPORT_CLASS_PCMCIA,
- PARPORT_CLASS_MEDIA, /* Multimedia device */
- PARPORT_CLASS_FDC, /* Floppy disk controller */
- PARPORT_CLASS_PORTS,
- PARPORT_CLASS_SCANNER,
- PARPORT_CLASS_DIGCAM,
- PARPORT_CLASS_OTHER, /* Anything else */
- PARPORT_CLASS_UNSPEC, /* No CLS field in ID */
- PARPORT_CLASS_SCSIADAPTER
-} parport_device_class;
-
-int parport_find_class (parport_device_class cls, int from);
+^^^^^^^^
+
+::
+
+ #include <linux/parport.h>
+
+ typedef enum {
+ PARPORT_CLASS_LEGACY = 0, /* Non-IEEE1284 device */
+ PARPORT_CLASS_PRINTER,
+ PARPORT_CLASS_MODEM,
+ PARPORT_CLASS_NET,
+ PARPORT_CLASS_HDC, /* Hard disk controller */
+ PARPORT_CLASS_PCMCIA,
+ PARPORT_CLASS_MEDIA, /* Multimedia device */
+ PARPORT_CLASS_FDC, /* Floppy disk controller */
+ PARPORT_CLASS_PORTS,
+ PARPORT_CLASS_SCANNER,
+ PARPORT_CLASS_DIGCAM,
+ PARPORT_CLASS_OTHER, /* Anything else */
+ PARPORT_CLASS_UNSPEC, /* No CLS field in ID */
+ PARPORT_CLASS_SCSIADAPTER
+ } parport_device_class;
+
+ int parport_find_class (parport_device_class cls, int from);
DESCRIPTION
+^^^^^^^^^^^
Find a device by class. The search starts from device number from+1.
RETURN VALUE
+^^^^^^^^^^^^
The device number of the next device in that class, or -1 if no such
device exists.
NOTES
+^^^^^
-Example usage:
+Example usage::
-int devnum = -1;
-while ((devnum = parport_find_class (PARPORT_CLASS_DIGCAM, devnum)) != -1) {
- struct pardevice *dev = parport_open (devnum, ...);
- ...
-}
+ int devnum = -1;
+ while ((devnum = parport_find_class (PARPORT_CLASS_DIGCAM, devnum)) != -1) {
+ struct pardevice *dev = parport_open (devnum, ...);
+ ...
+ }
SEE ALSO
+^^^^^^^^
parport_find_device, parport_open, parport_device_id
-
+
+
+
parport_find_device - find a device by its class
-------------------
+------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-int parport_find_device (const char *mfg, const char *mdl, int from);
+ #include <linux/parport.h>
+
+ int parport_find_device (const char *mfg, const char *mdl, int from);
DESCRIPTION
+^^^^^^^^^^^
Find a device by vendor and model. The search starts from device
number from+1.
RETURN VALUE
+^^^^^^^^^^^^
The device number of the next device matching the specifications, or
-1 if no such device exists.
NOTES
+^^^^^
-Example usage:
+Example usage::
-int devnum = -1;
-while ((devnum = parport_find_device ("IOMEGA", "ZIP+", devnum)) != -1) {
- struct pardevice *dev = parport_open (devnum, ...);
- ...
-}
+ int devnum = -1;
+ while ((devnum = parport_find_device ("IOMEGA", "ZIP+", devnum)) != -1) {
+ struct pardevice *dev = parport_open (devnum, ...);
+ ...
+ }
SEE ALSO
+^^^^^^^^
parport_find_class, parport_open, parport_device_id
+
+
parport_set_timeout - set the inactivity timeout
--------------------
+------------------------------------------------
SYNOPSIS
+^^^^^^^^
+
+::
-#include <linux/parport.h>
+ #include <linux/parport.h>
-long parport_set_timeout (struct pardevice *dev, long inactivity);
+ long parport_set_timeout (struct pardevice *dev, long inactivity);
DESCRIPTION
+^^^^^^^^^^^
Set the inactivity timeout, in jiffies, for a registered device. The
previous timeout is returned.
RETURN VALUE
+^^^^^^^^^^^^
The previous timeout, in jiffies.
NOTES
+^^^^^
Some of the port->ops functions for a parport may take time, owing to
delays at the peripheral. After the peripheral has not responded for
-'inactivity' jiffies, a timeout will occur and the blocking function
+``inactivity`` jiffies, a timeout will occur and the blocking function
will return.
A timeout of 0 jiffies is a special case: the function must do as much
@@ -932,29 +1135,37 @@ Once set for a registered device, the timeout will remain at the set
value until set again.
SEE ALSO
+^^^^^^^^
port->ops->xxx_read/write_yyy
-
+
+
+
+
PORT FUNCTIONS
---------------
+==============
The functions in the port->ops structure (struct parport_operations)
are provided by the low-level driver responsible for that port.
port->ops->read_data - read the data register
---------------------
+---------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- unsigned char (*read_data) (struct parport *port);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ unsigned char (*read_data) (struct parport *port);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
If port->modes contains the PARPORT_MODE_TRISTATE flag and the
PARPORT_CONTROL_DIRECTION bit in the control register is set, this
@@ -964,45 +1175,59 @@ not set, the return value _may_ be the last value written to the data
register. Otherwise the return value is undefined.
SEE ALSO
+^^^^^^^^
write_data, read_status, write_control
+
+
port->ops->write_data - write the data register
----------------------
+-----------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- void (*write_data) (struct parport *port, unsigned char d);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ void (*write_data) (struct parport *port, unsigned char d);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
Writes to the data register. May have side-effects (a STROBE pulse,
for instance).
SEE ALSO
+^^^^^^^^
read_data, read_status, write_control
+
+
port->ops->read_status - read the status register
-----------------------
+-------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- unsigned char (*read_status) (struct parport *port);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ unsigned char (*read_status) (struct parport *port);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
Reads from the status register. This is a bitmask:
@@ -1015,76 +1240,98 @@ Reads from the status register. This is a bitmask:
There may be other bits set.
SEE ALSO
+^^^^^^^^
read_data, write_data, write_control
+
+
port->ops->read_control - read the control register
------------------------
+---------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- unsigned char (*read_control) (struct parport *port);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ unsigned char (*read_control) (struct parport *port);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
Returns the last value written to the control register (either from
write_control or frob_control). No port access is performed.
SEE ALSO
+^^^^^^^^
read_data, write_data, read_status, write_control
+
+
port->ops->write_control - write the control register
-------------------------
+-----------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- void (*write_control) (struct parport *port, unsigned char s);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ void (*write_control) (struct parport *port, unsigned char s);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
-Writes to the control register. This is a bitmask:
- _______
-- PARPORT_CONTROL_STROBE (nStrobe)
- _______
-- PARPORT_CONTROL_AUTOFD (nAutoFd)
- _____
-- PARPORT_CONTROL_INIT (nInit)
- _________
-- PARPORT_CONTROL_SELECT (nSelectIn)
+Writes to the control register. This is a bitmask::
+
+ _______
+ - PARPORT_CONTROL_STROBE (nStrobe)
+ _______
+ - PARPORT_CONTROL_AUTOFD (nAutoFd)
+ _____
+ - PARPORT_CONTROL_INIT (nInit)
+ _________
+ - PARPORT_CONTROL_SELECT (nSelectIn)
SEE ALSO
+^^^^^^^^
read_data, write_data, read_status, frob_control
+
+
port->ops->frob_control - write control register bits
------------------------
+-----------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- unsigned char (*frob_control) (struct parport *port,
- unsigned char mask,
- unsigned char val);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ unsigned char (*frob_control) (struct parport *port,
+ unsigned char mask,
+ unsigned char val);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
This is equivalent to reading from the control register, masking out
the bits in mask, exclusive-or'ing with the bits in val, and writing
@@ -1095,23 +1342,30 @@ of its contents is maintained, so frob_control is in fact only one
port access.
SEE ALSO
+^^^^^^^^
read_data, write_data, read_status, write_control
+
+
port->ops->enable_irq - enable interrupt generation
----------------------
+---------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- void (*enable_irq) (struct parport *port);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ void (*enable_irq) (struct parport *port);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
The parallel port hardware is instructed to generate interrupts at
appropriate moments, although those moments are
@@ -1119,353 +1373,460 @@ architecture-specific. For the PC architecture, interrupts are
commonly generated on the rising edge of nAck.
SEE ALSO
+^^^^^^^^
disable_irq
+
+
port->ops->disable_irq - disable interrupt generation
-----------------------
+-----------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- void (*disable_irq) (struct parport *port);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ void (*disable_irq) (struct parport *port);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
The parallel port hardware is instructed not to generate interrupts.
The interrupt itself is not masked.
SEE ALSO
+^^^^^^^^
enable_irq
+
+
port->ops->data_forward - enable data drivers
------------------------
+---------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- void (*data_forward) (struct parport *port);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ void (*data_forward) (struct parport *port);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
Enables the data line drivers, for 8-bit host-to-peripheral
communications.
SEE ALSO
+^^^^^^^^
data_reverse
+
+
port->ops->data_reverse - tristate the buffer
------------------------
+---------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- void (*data_reverse) (struct parport *port);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ void (*data_reverse) (struct parport *port);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
Places the data bus in a high impedance state, if port->modes has the
PARPORT_MODE_TRISTATE bit set.
SEE ALSO
+^^^^^^^^
data_forward
-
+
+
+
port->ops->epp_write_data - write EPP data
--------------------------
+------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- size_t (*epp_write_data) (struct parport *port, const void *buf,
- size_t len, int flags);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ size_t (*epp_write_data) (struct parport *port, const void *buf,
+ size_t len, int flags);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
Writes data in EPP mode, and returns the number of bytes written.
-The 'flags' parameter may be one or more of the following,
+The ``flags`` parameter may be one or more of the following,
bitwise-or'ed together:
+======================= =================================================
PARPORT_EPP_FAST Use fast transfers. Some chips provide 16-bit and
32-bit registers. However, if a transfer
times out, the return value may be unreliable.
+======================= =================================================
SEE ALSO
+^^^^^^^^
epp_read_data, epp_write_addr, epp_read_addr
+
+
port->ops->epp_read_data - read EPP data
-------------------------
+----------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- size_t (*epp_read_data) (struct parport *port, void *buf,
- size_t len, int flags);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ size_t (*epp_read_data) (struct parport *port, void *buf,
+ size_t len, int flags);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
Reads data in EPP mode, and returns the number of bytes read.
-The 'flags' parameter may be one or more of the following,
+The ``flags`` parameter may be one or more of the following,
bitwise-or'ed together:
+======================= =================================================
PARPORT_EPP_FAST Use fast transfers. Some chips provide 16-bit and
32-bit registers. However, if a transfer
times out, the return value may be unreliable.
+======================= =================================================
SEE ALSO
+^^^^^^^^
epp_write_data, epp_write_addr, epp_read_addr
-
+
+
+
port->ops->epp_write_addr - write EPP address
--------------------------
+---------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- size_t (*epp_write_addr) (struct parport *port,
- const void *buf, size_t len, int flags);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ size_t (*epp_write_addr) (struct parport *port,
+ const void *buf, size_t len, int flags);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
Writes EPP addresses (8 bits each), and returns the number written.
-The 'flags' parameter may be one or more of the following,
+The ``flags`` parameter may be one or more of the following,
bitwise-or'ed together:
+======================= =================================================
PARPORT_EPP_FAST Use fast transfers. Some chips provide 16-bit and
32-bit registers. However, if a transfer
times out, the return value may be unreliable.
+======================= =================================================
(Does PARPORT_EPP_FAST make sense for this function?)
SEE ALSO
+^^^^^^^^
epp_write_data, epp_read_data, epp_read_addr
+
+
port->ops->epp_read_addr - read EPP address
-------------------------
+-------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- size_t (*epp_read_addr) (struct parport *port, void *buf,
- size_t len, int flags);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ size_t (*epp_read_addr) (struct parport *port, void *buf,
+ size_t len, int flags);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
Reads EPP addresses (8 bits each), and returns the number read.
-The 'flags' parameter may be one or more of the following,
+The ``flags`` parameter may be one or more of the following,
bitwise-or'ed together:
+======================= =================================================
PARPORT_EPP_FAST Use fast transfers. Some chips provide 16-bit and
32-bit registers. However, if a transfer
times out, the return value may be unreliable.
+======================= =================================================
(Does PARPORT_EPP_FAST make sense for this function?)
SEE ALSO
+^^^^^^^^
epp_write_data, epp_read_data, epp_write_addr
+
+
port->ops->ecp_write_data - write a block of ECP data
--------------------------
+-----------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- size_t (*ecp_write_data) (struct parport *port,
- const void *buf, size_t len, int flags);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ size_t (*ecp_write_data) (struct parport *port,
+ const void *buf, size_t len, int flags);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
-Writes a block of ECP data. The 'flags' parameter is ignored.
+Writes a block of ECP data. The ``flags`` parameter is ignored.
RETURN VALUE
+^^^^^^^^^^^^
The number of bytes written.
SEE ALSO
+^^^^^^^^
ecp_read_data, ecp_write_addr
+
+
port->ops->ecp_read_data - read a block of ECP data
-------------------------
+---------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- size_t (*ecp_read_data) (struct parport *port,
- void *buf, size_t len, int flags);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ size_t (*ecp_read_data) (struct parport *port,
+ void *buf, size_t len, int flags);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
-Reads a block of ECP data. The 'flags' parameter is ignored.
+Reads a block of ECP data. The ``flags`` parameter is ignored.
RETURN VALUE
+^^^^^^^^^^^^
The number of bytes read. NB. There may be more unread data in a
FIFO. Is there a way of stunning the FIFO to prevent this?
SEE ALSO
+^^^^^^^^
ecp_write_block, ecp_write_addr
-
+
+
+
port->ops->ecp_write_addr - write a block of ECP addresses
--------------------------
+----------------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- size_t (*ecp_write_addr) (struct parport *port,
- const void *buf, size_t len, int flags);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ size_t (*ecp_write_addr) (struct parport *port,
+ const void *buf, size_t len, int flags);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
-Writes a block of ECP addresses. The 'flags' parameter is ignored.
+Writes a block of ECP addresses. The ``flags`` parameter is ignored.
RETURN VALUE
+^^^^^^^^^^^^
The number of bytes written.
NOTES
+^^^^^
This may use a FIFO, and if so shall not return until the FIFO is empty.
SEE ALSO
+^^^^^^^^
ecp_read_data, ecp_write_data
-
+
+
+
port->ops->nibble_read_data - read a block of data in nibble mode
----------------------------
+-----------------------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- size_t (*nibble_read_data) (struct parport *port,
- void *buf, size_t len, int flags);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ size_t (*nibble_read_data) (struct parport *port,
+ void *buf, size_t len, int flags);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
-Reads a block of data in nibble mode. The 'flags' parameter is ignored.
+Reads a block of data in nibble mode. The ``flags`` parameter is ignored.
RETURN VALUE
+^^^^^^^^^^^^
The number of whole bytes read.
SEE ALSO
+^^^^^^^^
byte_read_data, compat_write_data
+
+
port->ops->byte_read_data - read a block of data in byte mode
--------------------------
+-------------------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- size_t (*byte_read_data) (struct parport *port,
- void *buf, size_t len, int flags);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ size_t (*byte_read_data) (struct parport *port,
+ void *buf, size_t len, int flags);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
-Reads a block of data in byte mode. The 'flags' parameter is ignored.
+Reads a block of data in byte mode. The ``flags`` parameter is ignored.
RETURN VALUE
+^^^^^^^^^^^^
The number of bytes read.
SEE ALSO
+^^^^^^^^
nibble_read_data, compat_write_data
+
+
port->ops->compat_write_data - write a block of data in compatibility mode
-----------------------------
+--------------------------------------------------------------------------
SYNOPSIS
+^^^^^^^^
-#include <linux/parport.h>
+::
-struct parport_operations {
- ...
- size_t (*compat_write_data) (struct parport *port,
- const void *buf, size_t len, int flags);
- ...
-};
+ #include <linux/parport.h>
+
+ struct parport_operations {
+ ...
+ size_t (*compat_write_data) (struct parport *port,
+ const void *buf, size_t len, int flags);
+ ...
+ };
DESCRIPTION
+^^^^^^^^^^^
-Writes a block of data in compatibility mode. The 'flags' parameter
+Writes a block of data in compatibility mode. The ``flags`` parameter
is ignored.
RETURN VALUE
+^^^^^^^^^^^^
The number of bytes written.
SEE ALSO
+^^^^^^^^
nibble_read_data, byte_read_data
diff --git a/Documentation/percpu-rw-semaphore.txt b/Documentation/percpu-rw-semaphore.txt
index 7d3c82431909..247de6410855 100644
--- a/Documentation/percpu-rw-semaphore.txt
+++ b/Documentation/percpu-rw-semaphore.txt
@@ -1,5 +1,6 @@
+====================
Percpu rw semaphores
---------------------
+====================
Percpu rw semaphores is a new read-write semaphore design that is
optimized for locking for reading.
diff --git a/Documentation/phy.txt b/Documentation/phy.txt
index 383cdd863f08..457c3e0f86d6 100644
--- a/Documentation/phy.txt
+++ b/Documentation/phy.txt
@@ -1,10 +1,14 @@
- PHY SUBSYSTEM
- Kishon Vijay Abraham I <kishon@ti.com>
+=============
+PHY subsystem
+=============
+
+:Author: Kishon Vijay Abraham I <kishon@ti.com>
This document explains the Generic PHY Framework along with the APIs provided,
and how-to-use.
-1. Introduction
+Introduction
+============
*PHY* is the abbreviation for physical layer. It is used to connect a device
to the physical medium e.g., the USB controller has a PHY to provide functions
@@ -21,7 +25,8 @@ better code maintainability.
This framework will be of use only to devices that use external PHY (PHY
functionality is not embedded within the controller).
-2. Registering/Unregistering the PHY provider
+Registering/Unregistering the PHY provider
+==========================================
PHY provider refers to an entity that implements one or more PHY instances.
For the simple case where the PHY provider implements only a single instance of
@@ -30,11 +35,14 @@ of_phy_simple_xlate. If the PHY provider implements multiple instances, it
should provide its own implementation of of_xlate. of_xlate is used only for
dt boot case.
-#define of_phy_provider_register(dev, xlate) \
- __of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate))
+::
+
+ #define of_phy_provider_register(dev, xlate) \
+ __of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate))
-#define devm_of_phy_provider_register(dev, xlate) \
- __devm_of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate))
+ #define devm_of_phy_provider_register(dev, xlate) \
+ __devm_of_phy_provider_register((dev), NULL, THIS_MODULE,
+ (xlate))
of_phy_provider_register and devm_of_phy_provider_register macros can be used to
register the phy_provider and it takes device and of_xlate as
@@ -47,28 +55,35 @@ nodes within extra levels for context and extensibility, in which case the low
level of_phy_provider_register_full() and devm_of_phy_provider_register_full()
macros can be used to override the node containing the children.
-#define of_phy_provider_register_full(dev, children, xlate) \
- __of_phy_provider_register(dev, children, THIS_MODULE, xlate)
+::
+
+ #define of_phy_provider_register_full(dev, children, xlate) \
+ __of_phy_provider_register(dev, children, THIS_MODULE, xlate)
-#define devm_of_phy_provider_register_full(dev, children, xlate) \
- __devm_of_phy_provider_register_full(dev, children, THIS_MODULE, xlate)
+ #define devm_of_phy_provider_register_full(dev, children, xlate) \
+ __devm_of_phy_provider_register_full(dev, children,
+ THIS_MODULE, xlate)
-void devm_of_phy_provider_unregister(struct device *dev,
- struct phy_provider *phy_provider);
-void of_phy_provider_unregister(struct phy_provider *phy_provider);
+ void devm_of_phy_provider_unregister(struct device *dev,
+ struct phy_provider *phy_provider);
+ void of_phy_provider_unregister(struct phy_provider *phy_provider);
devm_of_phy_provider_unregister and of_phy_provider_unregister can be used to
unregister the PHY.
-3. Creating the PHY
+Creating the PHY
+================
The PHY driver should create the PHY in order for other peripheral controllers
to make use of it. The PHY framework provides 2 APIs to create the PHY.
-struct phy *phy_create(struct device *dev, struct device_node *node,
- const struct phy_ops *ops);
-struct phy *devm_phy_create(struct device *dev, struct device_node *node,
- const struct phy_ops *ops);
+::
+
+ struct phy *phy_create(struct device *dev, struct device_node *node,
+ const struct phy_ops *ops);
+ struct phy *devm_phy_create(struct device *dev,
+ struct device_node *node,
+ const struct phy_ops *ops);
The PHY drivers can use one of the above 2 APIs to create the PHY by passing
the device pointer and phy ops.
@@ -84,12 +99,16 @@ phy_ops to get back the private data.
Before the controller can make use of the PHY, it has to get a reference to
it. This framework provides the following APIs to get a reference to the PHY.
-struct phy *phy_get(struct device *dev, const char *string);
-struct phy *phy_optional_get(struct device *dev, const char *string);
-struct phy *devm_phy_get(struct device *dev, const char *string);
-struct phy *devm_phy_optional_get(struct device *dev, const char *string);
-struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
- int index);
+::
+
+ struct phy *phy_get(struct device *dev, const char *string);
+ struct phy *phy_optional_get(struct device *dev, const char *string);
+ struct phy *devm_phy_get(struct device *dev, const char *string);
+ struct phy *devm_phy_optional_get(struct device *dev,
+ const char *string);
+ struct phy *devm_of_phy_get_by_index(struct device *dev,
+ struct device_node *np,
+ int index);
phy_get, phy_optional_get, devm_phy_get and devm_phy_optional_get can
be used to get the PHY. In the case of dt boot, the string arguments
@@ -111,30 +130,35 @@ the phy_init() and phy_exit() calls, and phy_power_on() and
phy_power_off() calls are all NOP when applied to a NULL phy. The NULL
phy is useful in devices for handling optional phy devices.
-5. Releasing a reference to the PHY
+Releasing a reference to the PHY
+================================
When the controller no longer needs the PHY, it has to release the reference
to the PHY it has obtained using the APIs mentioned in the above section. The
PHY framework provides 2 APIs to release a reference to the PHY.
-void phy_put(struct phy *phy);
-void devm_phy_put(struct device *dev, struct phy *phy);
+::
+
+ void phy_put(struct phy *phy);
+ void devm_phy_put(struct device *dev, struct phy *phy);
Both these APIs are used to release a reference to the PHY and devm_phy_put
destroys the devres associated with this PHY.
-6. Destroying the PHY
+Destroying the PHY
+==================
When the driver that created the PHY is unloaded, it should destroy the PHY it
-created using one of the following 2 APIs.
+created using one of the following 2 APIs::
-void phy_destroy(struct phy *phy);
-void devm_phy_destroy(struct device *dev, struct phy *phy);
+ void phy_destroy(struct phy *phy);
+ void devm_phy_destroy(struct device *dev, struct phy *phy);
Both these APIs destroy the PHY and devm_phy_destroy destroys the devres
associated with this PHY.
-7. PM Runtime
+PM Runtime
+==========
This subsystem is pm runtime enabled. So while creating the PHY,
pm_runtime_enable of the phy device created by this subsystem is called and
@@ -150,7 +174,8 @@ There are exported APIs like phy_pm_runtime_get, phy_pm_runtime_get_sync,
phy_pm_runtime_put, phy_pm_runtime_put_sync, phy_pm_runtime_allow and
phy_pm_runtime_forbid for performing PM operations.
-8. PHY Mappings
+PHY Mappings
+============
In order to get reference to a PHY without help from DeviceTree, the framework
offers lookups which can be compared to clkdev that allow clk structures to be
@@ -158,12 +183,15 @@ bound to devices. A lookup can be made be made during runtime when a handle to
the struct phy already exists.
The framework offers the following API for registering and unregistering the
-lookups.
+lookups::
-int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id);
-void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id);
+ int phy_create_lookup(struct phy *phy, const char *con_id,
+ const char *dev_id);
+ void phy_remove_lookup(struct phy *phy, const char *con_id,
+ const char *dev_id);
-9. DeviceTree Binding
+DeviceTree Binding
+==================
The documentation for PHY dt binding can be found @
Documentation/devicetree/bindings/phy/phy-bindings.txt
diff --git a/Documentation/pi-futex.txt b/Documentation/pi-futex.txt
index 9a5bc8651c29..aafddbee7377 100644
--- a/Documentation/pi-futex.txt
+++ b/Documentation/pi-futex.txt
@@ -1,5 +1,6 @@
+======================
Lightweight PI-futexes
-----------------------
+======================
We are calling them lightweight for 3 reasons:
@@ -25,8 +26,8 @@ determinism and well-bound latencies. Even in the worst-case, PI will
improve the statistical distribution of locking related application
delays.
-The longer reply:
------------------
+The longer reply
+----------------
Firstly, sharing locks between multiple tasks is a common programming
technique that often cannot be replaced with lockless algorithms. As we
@@ -71,8 +72,8 @@ deterministic execution of the high-prio task: any medium-priority task
could preempt the low-prio task while it holds the shared lock and
executes the critical section, and could delay it indefinitely.
-Implementation:
----------------
+Implementation
+--------------
As mentioned before, the userspace fastpath of PI-enabled pthread
mutexes involves no kernel work at all - they behave quite similarly to
@@ -83,8 +84,8 @@ entering the kernel.
To handle the slowpath, we have added two new futex ops:
- FUTEX_LOCK_PI
- FUTEX_UNLOCK_PI
+ - FUTEX_LOCK_PI
+ - FUTEX_UNLOCK_PI
If the lock-acquire fastpath fails, [i.e. an atomic transition from 0 to
TID fails], then FUTEX_LOCK_PI is called. The kernel does all the
diff --git a/Documentation/pnp.txt b/Documentation/pnp.txt
index 763e4659bf18..bab2d10631f0 100644
--- a/Documentation/pnp.txt
+++ b/Documentation/pnp.txt
@@ -1,98 +1,118 @@
+=================================
Linux Plug and Play Documentation
-by Adam Belay <ambx1@neo.rr.com>
-last updated: Oct. 16, 2002
----------------------------------------------------------------------------------------
+=================================
+:Author: Adam Belay <ambx1@neo.rr.com>
+:Last updated: Oct. 16, 2002
Overview
--------
- Plug and Play provides a means of detecting and setting resources for legacy or
+
+Plug and Play provides a means of detecting and setting resources for legacy or
otherwise unconfigurable devices. The Linux Plug and Play Layer provides these
services to compatible drivers.
-
The User Interface
------------------
- The Linux Plug and Play user interface provides a means to activate PnP devices
+
+The Linux Plug and Play user interface provides a means to activate PnP devices
for legacy and user level drivers that do not support Linux Plug and Play. The
user interface is integrated into sysfs.
In addition to the standard sysfs file the following are created in each
device's directory:
-id - displays a list of support EISA IDs
-options - displays possible resource configurations
-resources - displays currently allocated resources and allows resource changes
+- id - displays a list of support EISA IDs
+- options - displays possible resource configurations
+- resources - displays currently allocated resources and allows resource changes
--activating a device
+activating a device
+^^^^^^^^^^^^^^^^^^^
-#echo "auto" > resources
+::
+
+ # echo "auto" > resources
this will invoke the automatic resource config system to activate the device
--manually activating a device
+manually activating a device
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
+ # echo "manual <depnum> <mode>" > resources
-#echo "manual <depnum> <mode>" > resources
-<depnum> - the configuration number
-<mode> - static or dynamic
- static = for next boot
- dynamic = now
+ <depnum> - the configuration number
+ <mode> - static or dynamic
+ static = for next boot
+ dynamic = now
--disabling a device
+disabling a device
+^^^^^^^^^^^^^^^^^^
-#echo "disable" > resources
+::
+
+ # echo "disable" > resources
EXAMPLE:
Suppose you need to activate the floppy disk controller.
-1.) change to the proper directory, in my case it is
-/driver/bus/pnp/devices/00:0f
-# cd /driver/bus/pnp/devices/00:0f
-# cat name
-PC standard floppy disk controller
-
-2.) check if the device is already active
-# cat resources
-DISABLED
-
-- Notice the string "DISABLED". This means the device is not active.
-
-3.) check the device's possible configurations (optional)
-# cat options
-Dependent: 01 - Priority acceptable
- port 0x3f0-0x3f0, align 0x7, size 0x6, 16-bit address decoding
- port 0x3f7-0x3f7, align 0x0, size 0x1, 16-bit address decoding
- irq 6
- dma 2 8-bit compatible
-Dependent: 02 - Priority acceptable
- port 0x370-0x370, align 0x7, size 0x6, 16-bit address decoding
- port 0x377-0x377, align 0x0, size 0x1, 16-bit address decoding
- irq 6
- dma 2 8-bit compatible
-
-4.) now activate the device
-# echo "auto" > resources
-
-5.) finally check if the device is active
-# cat resources
-io 0x3f0-0x3f5
-io 0x3f7-0x3f7
-irq 6
-dma 2
-
-also there are a series of kernel parameters:
-pnp_reserve_irq=irq1[,irq2] ....
-pnp_reserve_dma=dma1[,dma2] ....
-pnp_reserve_io=io1,size1[,io2,size2] ....
-pnp_reserve_mem=mem1,size1[,mem2,size2] ....
+
+1. change to the proper directory, in my case it is
+ /driver/bus/pnp/devices/00:0f::
+
+ # cd /driver/bus/pnp/devices/00:0f
+ # cat name
+ PC standard floppy disk controller
+
+2. check if the device is already active::
+
+ # cat resources
+ DISABLED
+
+ - Notice the string "DISABLED". This means the device is not active.
+
+3. check the device's possible configurations (optional)::
+
+ # cat options
+ Dependent: 01 - Priority acceptable
+ port 0x3f0-0x3f0, align 0x7, size 0x6, 16-bit address decoding
+ port 0x3f7-0x3f7, align 0x0, size 0x1, 16-bit address decoding
+ irq 6
+ dma 2 8-bit compatible
+ Dependent: 02 - Priority acceptable
+ port 0x370-0x370, align 0x7, size 0x6, 16-bit address decoding
+ port 0x377-0x377, align 0x0, size 0x1, 16-bit address decoding
+ irq 6
+ dma 2 8-bit compatible
+
+4. now activate the device::
+
+ # echo "auto" > resources
+
+5. finally check if the device is active::
+
+ # cat resources
+ io 0x3f0-0x3f5
+ io 0x3f7-0x3f7
+ irq 6
+ dma 2
+
+also there are a series of kernel parameters::
+
+ pnp_reserve_irq=irq1[,irq2] ....
+ pnp_reserve_dma=dma1[,dma2] ....
+ pnp_reserve_io=io1,size1[,io2,size2] ....
+ pnp_reserve_mem=mem1,size1[,mem2,size2] ....
The Unified Plug and Play Layer
-------------------------------
- All Plug and Play drivers, protocols, and services meet at a central location
+
+All Plug and Play drivers, protocols, and services meet at a central location
called the Plug and Play Layer. This layer is responsible for the exchange of
information between PnP drivers and PnP protocols. Thus it automatically
forwards commands to the proper protocol. This makes writing PnP drivers
@@ -101,64 +121,73 @@ significantly easier.
The following functions are available from the Plug and Play Layer:
pnp_get_protocol
-- increments the number of uses by one
+ increments the number of uses by one
pnp_put_protocol
-- deincrements the number of uses by one
+ deincrements the number of uses by one
pnp_register_protocol
-- use this to register a new PnP protocol
+ use this to register a new PnP protocol
pnp_unregister_protocol
-- use this function to remove a PnP protocol from the Plug and Play Layer
+ use this function to remove a PnP protocol from the Plug and Play Layer
pnp_register_driver
-- adds a PnP driver to the Plug and Play Layer
-- this includes driver model integration
-- returns zero for success or a negative error number for failure; count
+ adds a PnP driver to the Plug and Play Layer
+
+ this includes driver model integration
+ returns zero for success or a negative error number for failure; count
calls to the .add() method if you need to know how many devices bind to
the driver
pnp_unregister_driver
-- removes a PnP driver from the Plug and Play Layer
+ removes a PnP driver from the Plug and Play Layer
Plug and Play Protocols
-----------------------
- This section contains information for PnP protocol developers.
+
+This section contains information for PnP protocol developers.
The following Protocols are currently available in the computing world:
-- PNPBIOS: used for system devices such as serial and parallel ports.
-- ISAPNP: provides PnP support for the ISA bus
-- ACPI: among its many uses, ACPI provides information about system level
-devices.
+
+- PNPBIOS:
+ used for system devices such as serial and parallel ports.
+- ISAPNP:
+ provides PnP support for the ISA bus
+- ACPI:
+ among its many uses, ACPI provides information about system level
+ devices.
+
It is meant to replace the PNPBIOS. It is not currently supported by Linux
Plug and Play but it is planned to be in the near future.
Requirements for a Linux PnP protocol:
-1.) the protocol must use EISA IDs
-2.) the protocol must inform the PnP Layer of a device's current configuration
+1. the protocol must use EISA IDs
+2. the protocol must inform the PnP Layer of a device's current configuration
+
- the ability to set resources is optional but preferred.
The following are PnP protocol related functions:
pnp_add_device
-- use this function to add a PnP device to the PnP layer
-- only call this function when all wanted values are set in the pnp_dev
-structure
+ use this function to add a PnP device to the PnP layer
+
+ only call this function when all wanted values are set in the pnp_dev
+ structure
pnp_init_device
-- call this to initialize the PnP structure
+ call this to initialize the PnP structure
pnp_remove_device
-- call this to remove a device from the Plug and Play Layer.
-- it will fail if the device is still in use.
-- automatically will free mem used by the device and related structures
+ call this to remove a device from the Plug and Play Layer.
+ it will fail if the device is still in use.
+ automatically will free mem used by the device and related structures
pnp_add_id
-- adds an EISA ID to the list of supported IDs for the specified device
+ adds an EISA ID to the list of supported IDs for the specified device
For more information consult the source of a protocol such as
/drivers/pnp/pnpbios/core.c.
@@ -167,85 +196,97 @@ For more information consult the source of a protocol such as
Linux Plug and Play Drivers
---------------------------
- This section contains information for Linux PnP driver developers.
+
+This section contains information for Linux PnP driver developers.
The New Way
-...........
-1.) first make a list of supported EISA IDS
-ex:
-static const struct pnp_id pnp_dev_table[] = {
- /* Standard LPT Printer Port */
- {.id = "PNP0400", .driver_data = 0},
- /* ECP Printer Port */
- {.id = "PNP0401", .driver_data = 0},
- {.id = ""}
-};
-
-Please note that the character 'X' can be used as a wild card in the function
-portion (last four characters).
-ex:
+^^^^^^^^^^^
+
+1. first make a list of supported EISA IDS
+
+ ex::
+
+ static const struct pnp_id pnp_dev_table[] = {
+ /* Standard LPT Printer Port */
+ {.id = "PNP0400", .driver_data = 0},
+ /* ECP Printer Port */
+ {.id = "PNP0401", .driver_data = 0},
+ {.id = ""}
+ };
+
+ Please note that the character 'X' can be used as a wild card in the function
+ portion (last four characters).
+
+ ex::
+
/* Unknown PnP modems */
{ "PNPCXXX", UNKNOWN_DEV },
-Supported PnP card IDs can optionally be defined.
-ex:
-static const struct pnp_id pnp_card_table[] = {
- { "ANYDEVS", 0 },
- { "", 0 }
-};
-
-2.) Optionally define probe and remove functions. It may make sense not to
-define these functions if the driver already has a reliable method of detecting
-the resources, such as the parport_pc driver.
-ex:
-static int
-serial_pnp_probe(struct pnp_dev * dev, const struct pnp_id *card_id, const
- struct pnp_id *dev_id)
-{
-. . .
-
-ex:
-static void serial_pnp_remove(struct pnp_dev * dev)
-{
-. . .
-
-consult /drivers/serial/8250_pnp.c for more information.
-
-3.) create a driver structure
-ex:
-
-static struct pnp_driver serial_pnp_driver = {
- .name = "serial",
- .card_id_table = pnp_card_table,
- .id_table = pnp_dev_table,
- .probe = serial_pnp_probe,
- .remove = serial_pnp_remove,
-};
-
-* name and id_table cannot be NULL.
-
-4.) register the driver
-ex:
-
-static int __init serial8250_pnp_init(void)
-{
- return pnp_register_driver(&serial_pnp_driver);
-}
+ Supported PnP card IDs can optionally be defined.
+ ex::
+
+ static const struct pnp_id pnp_card_table[] = {
+ { "ANYDEVS", 0 },
+ { "", 0 }
+ };
+
+2. Optionally define probe and remove functions. It may make sense not to
+ define these functions if the driver already has a reliable method of detecting
+ the resources, such as the parport_pc driver.
+
+ ex::
+
+ static int
+ serial_pnp_probe(struct pnp_dev * dev, const struct pnp_id *card_id, const
+ struct pnp_id *dev_id)
+ {
+ . . .
+
+ ex::
+
+ static void serial_pnp_remove(struct pnp_dev * dev)
+ {
+ . . .
+
+ consult /drivers/serial/8250_pnp.c for more information.
+
+3. create a driver structure
+
+ ex::
+
+ static struct pnp_driver serial_pnp_driver = {
+ .name = "serial",
+ .card_id_table = pnp_card_table,
+ .id_table = pnp_dev_table,
+ .probe = serial_pnp_probe,
+ .remove = serial_pnp_remove,
+ };
+
+ * name and id_table cannot be NULL.
+
+4. register the driver
+
+ ex::
+
+ static int __init serial8250_pnp_init(void)
+ {
+ return pnp_register_driver(&serial_pnp_driver);
+ }
The Old Way
-...........
+^^^^^^^^^^^
A series of compatibility functions have been created to make it easy to convert
ISAPNP drivers. They should serve as a temporary solution only.
-They are as follows:
+They are as follows::
-struct pnp_card *pnp_find_card(unsigned short vendor,
- unsigned short device,
- struct pnp_card *from)
+ struct pnp_card *pnp_find_card(unsigned short vendor,
+ unsigned short device,
+ struct pnp_card *from)
-struct pnp_dev *pnp_find_dev(struct pnp_card *card,
- unsigned short vendor,
- unsigned short function,
- struct pnp_dev *from)
+ struct pnp_dev *pnp_find_dev(struct pnp_card *card,
+ unsigned short vendor,
+ unsigned short function,
+ struct pnp_dev *from)
diff --git a/Documentation/preempt-locking.txt b/Documentation/preempt-locking.txt
index e89ce6624af2..c945062be66c 100644
--- a/Documentation/preempt-locking.txt
+++ b/Documentation/preempt-locking.txt
@@ -1,10 +1,13 @@
- Proper Locking Under a Preemptible Kernel:
- Keeping Kernel Code Preempt-Safe
- Robert Love <rml@tech9.net>
- Last Updated: 28 Aug 2002
+===========================================================================
+Proper Locking Under a Preemptible Kernel: Keeping Kernel Code Preempt-Safe
+===========================================================================
+:Author: Robert Love <rml@tech9.net>
+:Last Updated: 28 Aug 2002
-INTRODUCTION
+
+Introduction
+============
A preemptible kernel creates new locking issues. The issues are the same as
@@ -17,9 +20,10 @@ requires protecting these situations.
RULE #1: Per-CPU data structures need explicit protection
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Two similar problems arise. An example code snippet:
+Two similar problems arise. An example code snippet::
struct this_needs_locking tux[NR_CPUS];
tux[smp_processor_id()] = some_value;
@@ -35,6 +39,7 @@ You can also use put_cpu() and get_cpu(), which will disable preemption.
RULE #2: CPU state must be protected.
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Under preemption, the state of the CPU must be protected. This is arch-
@@ -52,6 +57,7 @@ However, fpu__restore() must be called with preemption disabled.
RULE #3: Lock acquire and release must be performed by same task
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A lock acquired in one task must be released by the same task. This
@@ -61,17 +67,20 @@ like this, acquire and release the task in the same code path and
have the caller wait on an event by the other task.
-SOLUTION
+Solution
+========
Data protection under preemption is achieved by disabling preemption for the
duration of the critical region.
-preempt_enable() decrement the preempt counter
-preempt_disable() increment the preempt counter
-preempt_enable_no_resched() decrement, but do not immediately preempt
-preempt_check_resched() if needed, reschedule
-preempt_count() return the preempt counter
+::
+
+ preempt_enable() decrement the preempt counter
+ preempt_disable() increment the preempt counter
+ preempt_enable_no_resched() decrement, but do not immediately preempt
+ preempt_check_resched() if needed, reschedule
+ preempt_count() return the preempt counter
The functions are nestable. In other words, you can call preempt_disable
n-times in a code path, and preemption will not be reenabled until the n-th
@@ -89,7 +98,7 @@ So use this implicit preemption-disabling property only if you know that the
affected codepath does not do any of this. Best policy is to use this only for
small, atomic code that you wrote and which calls no complex functions.
-Example:
+Example::
cpucache_t *cc; /* this is per-CPU */
preempt_disable();
@@ -102,7 +111,7 @@ Example:
return 0;
Notice how the preemption statements must encompass every reference of the
-critical variables. Another example:
+critical variables. Another example::
int buf[NR_CPUS];
set_cpu_val(buf);
@@ -114,7 +123,8 @@ This code is not preempt-safe, but see how easily we can fix it by simply
moving the spin_lock up two lines.
-PREVENTING PREEMPTION USING INTERRUPT DISABLING
+Preventing preemption using interrupt disabling
+===============================================
It is possible to prevent a preemption event using local_irq_disable and
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 619cdffa5d44..65ea5915178b 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -1,5 +1,18 @@
-If variable is of Type, use printk format specifier:
----------------------------------------------------------
+=========================================
+How to get printk format specifiers right
+=========================================
+
+:Author: Randy Dunlap <rdunlap@infradead.org>
+:Author: Andrew Murray <amurray@mpc-data.co.uk>
+
+
+Integer types
+=============
+
+::
+
+ If variable is of Type, use printk format specifier:
+ ------------------------------------------------------------
int %d or %x
unsigned int %u or %x
long %ld or %lx
@@ -13,25 +26,29 @@ If variable is of Type, use printk format specifier:
s64 %lld or %llx
u64 %llu or %llx
-If <type> is dependent on a config option for its size (e.g., sector_t,
-blkcnt_t) or is architecture-dependent for its size (e.g., tcflag_t), use a
-format specifier of its largest possible type and explicitly cast to it.
-Example:
+If <type> is dependent on a config option for its size (e.g., ``sector_t``,
+``blkcnt_t``) or is architecture-dependent for its size (e.g., ``tcflag_t``),
+use a format specifier of its largest possible type and explicitly cast to it.
+
+Example::
printk("test: sector number/total blocks: %llu/%llu\n",
(unsigned long long)sector, (unsigned long long)blockcount);
-Reminder: sizeof() result is of type size_t.
+Reminder: ``sizeof()`` result is of type ``size_t``.
-The kernel's printf does not support %n. For obvious reasons, floating
-point formats (%e, %f, %g, %a) are also not recognized. Use of any
+The kernel's printf does not support ``%n``. For obvious reasons, floating
+point formats (``%e, %f, %g, %a``) are also not recognized. Use of any
unsupported specifier or length qualifier results in a WARN and early
return from vsnprintf.
Raw pointer value SHOULD be printed with %p. The kernel supports
the following extended format specifiers for pointer types:
-Symbols/Function Pointers:
+Symbols/Function Pointers
+=========================
+
+::
%pF versatile_init+0x0/0x110
%pf versatile_init
@@ -41,99 +58,122 @@ Symbols/Function Pointers:
%ps versatile_init
%pB prev_fn_of_versatile_init+0x88/0x88
- For printing symbols and function pointers. The 'S' and 's' specifiers
- result in the symbol name with ('S') or without ('s') offsets. Where
- this is used on a kernel without KALLSYMS - the symbol address is
- printed instead.
+For printing symbols and function pointers. The ``S`` and ``s`` specifiers
+result in the symbol name with (``S``) or without (``s``) offsets. Where
+this is used on a kernel without KALLSYMS - the symbol address is
+printed instead.
+
+The ``B`` specifier results in the symbol name with offsets and should be
+used when printing stack backtraces. The specifier takes into
+consideration the effect of compiler optimisations which may occur
+when tail-call``s are used and marked with the noreturn GCC attribute.
- The 'B' specifier results in the symbol name with offsets and should be
- used when printing stack backtraces. The specifier takes into
- consideration the effect of compiler optimisations which may occur
- when tail-call's are used and marked with the noreturn GCC attribute.
+On ia64, ppc64 and parisc64 architectures function pointers are
+actually function descriptors which must first be resolved. The ``F`` and
+``f`` specifiers perform this resolution and then provide the same
+functionality as the ``S`` and ``s`` specifiers.
- On ia64, ppc64 and parisc64 architectures function pointers are
- actually function descriptors which must first be resolved. The 'F' and
- 'f' specifiers perform this resolution and then provide the same
- functionality as the 'S' and 's' specifiers.
+Kernel Pointers
+===============
-Kernel Pointers:
+::
%pK 0x01234567 or 0x0123456789abcdef
- For printing kernel pointers which should be hidden from unprivileged
- users. The behaviour of %pK depends on the kptr_restrict sysctl - see
- Documentation/sysctl/kernel.txt for more details.
+For printing kernel pointers which should be hidden from unprivileged
+users. The behaviour of ``%pK`` depends on the ``kptr_restrict sysctl`` - see
+Documentation/sysctl/kernel.txt for more details.
+
+Struct Resources
+================
-Struct Resources:
+::
%pr [mem 0x60000000-0x6fffffff flags 0x2200] or
[mem 0x0000000060000000-0x000000006fffffff flags 0x2200]
%pR [mem 0x60000000-0x6fffffff pref] or
[mem 0x0000000060000000-0x000000006fffffff pref]
- For printing struct resources. The 'R' and 'r' specifiers result in a
- printed resource with ('R') or without ('r') a decoded flags member.
- Passed by reference.
+For printing struct resources. The ``R`` and ``r`` specifiers result in a
+printed resource with (``R``) or without (``r``) a decoded flags member.
+Passed by reference.
+
+Physical addresses types ``phys_addr_t``
+========================================
-Physical addresses types phys_addr_t:
+::
%pa[p] 0x01234567 or 0x0123456789abcdef
- For printing a phys_addr_t type (and its derivatives, such as
- resource_size_t) which can vary based on build options, regardless of
- the width of the CPU data path. Passed by reference.
+For printing a ``phys_addr_t`` type (and its derivatives, such as
+``resource_size_t``) which can vary based on build options, regardless of
+the width of the CPU data path. Passed by reference.
-DMA addresses types dma_addr_t:
+DMA addresses types ``dma_addr_t``
+==================================
+
+::
%pad 0x01234567 or 0x0123456789abcdef
- For printing a dma_addr_t type which can vary based on build options,
- regardless of the width of the CPU data path. Passed by reference.
+For printing a ``dma_addr_t`` type which can vary based on build options,
+regardless of the width of the CPU data path. Passed by reference.
+
+Raw buffer as an escaped string
+===============================
-Raw buffer as an escaped string:
+::
%*pE[achnops]
- For printing raw buffer as an escaped string. For the following buffer
+For printing raw buffer as an escaped string. For the following buffer::
1b 62 20 5c 43 07 22 90 0d 5d
- few examples show how the conversion would be done (the result string
- without surrounding quotes):
+few examples show how the conversion would be done (the result string
+without surrounding quotes)::
%*pE "\eb \C\a"\220\r]"
%*pEhp "\x1bb \C\x07"\x90\x0d]"
%*pEa "\e\142\040\\\103\a\042\220\r\135"
- The conversion rules are applied according to an optional combination
- of flags (see string_escape_mem() kernel documentation for the
- details):
- a - ESCAPE_ANY
- c - ESCAPE_SPECIAL
- h - ESCAPE_HEX
- n - ESCAPE_NULL
- o - ESCAPE_OCTAL
- p - ESCAPE_NP
- s - ESCAPE_SPACE
- By default ESCAPE_ANY_NP is used.
+The conversion rules are applied according to an optional combination
+of flags (see :c:func:`string_escape_mem` kernel documentation for the
+details):
+
+ - ``a`` - ESCAPE_ANY
+ - ``c`` - ESCAPE_SPECIAL
+ - ``h`` - ESCAPE_HEX
+ - ``n`` - ESCAPE_NULL
+ - ``o`` - ESCAPE_OCTAL
+ - ``p`` - ESCAPE_NP
+ - ``s`` - ESCAPE_SPACE
- ESCAPE_ANY_NP is the sane choice for many cases, in particularly for
- printing SSIDs.
+By default ESCAPE_ANY_NP is used.
- If field width is omitted the 1 byte only will be escaped.
+ESCAPE_ANY_NP is the sane choice for many cases, in particularly for
+printing SSIDs.
-Raw buffer as a hex string:
+If field width is omitted the 1 byte only will be escaped.
+
+Raw buffer as a hex string
+==========================
+
+::
%*ph 00 01 02 ... 3f
%*phC 00:01:02: ... :3f
%*phD 00-01-02- ... -3f
%*phN 000102 ... 3f
- For printing a small buffers (up to 64 bytes long) as a hex string with
- certain separator. For the larger buffers consider to use
- print_hex_dump().
+For printing a small buffers (up to 64 bytes long) as a hex string with
+certain separator. For the larger buffers consider to use
+:c:func:`print_hex_dump`.
+
+MAC/FDDI addresses
+==================
-MAC/FDDI addresses:
+::
%pM 00:01:02:03:04:05
%pMR 05:04:03:02:01:00
@@ -141,53 +181,62 @@ MAC/FDDI addresses:
%pm 000102030405
%pmR 050403020100
- For printing 6-byte MAC/FDDI addresses in hex notation. The 'M' and 'm'
- specifiers result in a printed address with ('M') or without ('m') byte
- separators. The default byte separator is the colon (':').
+For printing 6-byte MAC/FDDI addresses in hex notation. The ``M`` and ``m``
+specifiers result in a printed address with (``M``) or without (``m``) byte
+separators. The default byte separator is the colon (``:``).
- Where FDDI addresses are concerned the 'F' specifier can be used after
- the 'M' specifier to use dash ('-') separators instead of the default
- separator.
+Where FDDI addresses are concerned the ``F`` specifier can be used after
+the ``M`` specifier to use dash (``-``) separators instead of the default
+separator.
- For Bluetooth addresses the 'R' specifier shall be used after the 'M'
- specifier to use reversed byte order suitable for visual interpretation
- of Bluetooth addresses which are in the little endian order.
+For Bluetooth addresses the ``R`` specifier shall be used after the ``M``
+specifier to use reversed byte order suitable for visual interpretation
+of Bluetooth addresses which are in the little endian order.
- Passed by reference.
+Passed by reference.
+
+IPv4 addresses
+==============
-IPv4 addresses:
+::
%pI4 1.2.3.4
%pi4 001.002.003.004
%p[Ii]4[hnbl]
- For printing IPv4 dot-separated decimal addresses. The 'I4' and 'i4'
- specifiers result in a printed address with ('i4') or without ('I4')
- leading zeros.
+For printing IPv4 dot-separated decimal addresses. The ``I4`` and ``i4``
+specifiers result in a printed address with (``i4``) or without (``I4``)
+leading zeros.
- The additional 'h', 'n', 'b', and 'l' specifiers are used to specify
- host, network, big or little endian order addresses respectively. Where
- no specifier is provided the default network/big endian order is used.
+The additional ``h``, ``n``, ``b``, and ``l`` specifiers are used to specify
+host, network, big or little endian order addresses respectively. Where
+no specifier is provided the default network/big endian order is used.
- Passed by reference.
+Passed by reference.
-IPv6 addresses:
+IPv6 addresses
+==============
+
+::
%pI6 0001:0002:0003:0004:0005:0006:0007:0008
%pi6 00010002000300040005000600070008
%pI6c 1:2:3:4:5:6:7:8
- For printing IPv6 network-order 16-bit hex addresses. The 'I6' and 'i6'
- specifiers result in a printed address with ('I6') or without ('i6')
- colon-separators. Leading zeros are always used.
+For printing IPv6 network-order 16-bit hex addresses. The ``I6`` and ``i6``
+specifiers result in a printed address with (``I6``) or without (``i6``)
+colon-separators. Leading zeros are always used.
- The additional 'c' specifier can be used with the 'I' specifier to
- print a compressed IPv6 address as described by
- http://tools.ietf.org/html/rfc5952
+The additional ``c`` specifier can be used with the ``I`` specifier to
+print a compressed IPv6 address as described by
+http://tools.ietf.org/html/rfc5952
- Passed by reference.
+Passed by reference.
-IPv4/IPv6 addresses (generic, with port, flowinfo, scope):
+IPv4/IPv6 addresses (generic, with port, flowinfo, scope)
+=========================================================
+
+::
%pIS 1.2.3.4 or 0001:0002:0003:0004:0005:0006:0007:0008
%piS 001.002.003.004 or 00010002000300040005000600070008
@@ -195,87 +244,103 @@ IPv4/IPv6 addresses (generic, with port, flowinfo, scope):
%pISpc 1.2.3.4:12345 or [1:2:3:4:5:6:7:8]:12345
%p[Ii]S[pfschnbl]
- For printing an IP address without the need to distinguish whether it's
- of type AF_INET or AF_INET6, a pointer to a valid 'struct sockaddr',
- specified through 'IS' or 'iS', can be passed to this format specifier.
+For printing an IP address without the need to distinguish whether it``s
+of type AF_INET or AF_INET6, a pointer to a valid ``struct sockaddr``,
+specified through ``IS`` or ``iS``, can be passed to this format specifier.
- The additional 'p', 'f', and 's' specifiers are used to specify port
- (IPv4, IPv6), flowinfo (IPv6) and scope (IPv6). Ports have a ':' prefix,
- flowinfo a '/' and scope a '%', each followed by the actual value.
+The additional ``p``, ``f``, and ``s`` specifiers are used to specify port
+(IPv4, IPv6), flowinfo (IPv6) and scope (IPv6). Ports have a ``:`` prefix,
+flowinfo a ``/`` and scope a ``%``, each followed by the actual value.
- In case of an IPv6 address the compressed IPv6 address as described by
- http://tools.ietf.org/html/rfc5952 is being used if the additional
- specifier 'c' is given. The IPv6 address is surrounded by '[', ']' in
- case of additional specifiers 'p', 'f' or 's' as suggested by
- https://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-07
+In case of an IPv6 address the compressed IPv6 address as described by
+http://tools.ietf.org/html/rfc5952 is being used if the additional
+specifier ``c`` is given. The IPv6 address is surrounded by ``[``, ``]`` in
+case of additional specifiers ``p``, ``f`` or ``s`` as suggested by
+https://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-07
- In case of IPv4 addresses, the additional 'h', 'n', 'b', and 'l'
- specifiers can be used as well and are ignored in case of an IPv6
- address.
+In case of IPv4 addresses, the additional ``h``, ``n``, ``b``, and ``l``
+specifiers can be used as well and are ignored in case of an IPv6
+address.
- Passed by reference.
+Passed by reference.
- Further examples:
+Further examples::
%pISfc 1.2.3.4 or [1:2:3:4:5:6:7:8]/123456789
%pISsc 1.2.3.4 or [1:2:3:4:5:6:7:8]%1234567890
%pISpfc 1.2.3.4:12345 or [1:2:3:4:5:6:7:8]:12345/123456789
-UUID/GUID addresses:
+UUID/GUID addresses
+===================
+
+::
%pUb 00010203-0405-0607-0809-0a0b0c0d0e0f
%pUB 00010203-0405-0607-0809-0A0B0C0D0E0F
%pUl 03020100-0504-0706-0809-0a0b0c0e0e0f
%pUL 03020100-0504-0706-0809-0A0B0C0E0E0F
- For printing 16-byte UUID/GUIDs addresses. The additional 'l', 'L',
- 'b' and 'B' specifiers are used to specify a little endian order in
- lower ('l') or upper case ('L') hex characters - and big endian order
- in lower ('b') or upper case ('B') hex characters.
+For printing 16-byte UUID/GUIDs addresses. The additional 'l', 'L',
+'b' and 'B' specifiers are used to specify a little endian order in
+lower ('l') or upper case ('L') hex characters - and big endian order
+in lower ('b') or upper case ('B') hex characters.
- Where no additional specifiers are used the default big endian
- order with lower case hex characters will be printed.
+Where no additional specifiers are used the default big endian
+order with lower case hex characters will be printed.
- Passed by reference.
+Passed by reference.
+
+dentry names
+============
-dentry names:
+::
%pd{,2,3,4}
%pD{,2,3,4}
- For printing dentry name; if we race with d_move(), the name might be
- a mix of old and new ones, but it won't oops. %pd dentry is a safer
- equivalent of %s dentry->d_name.name we used to use, %pd<n> prints
- n last components. %pD does the same thing for struct file.
+For printing dentry name; if we race with :c:func:`d_move`, the name might be
+a mix of old and new ones, but it won't oops. ``%pd`` dentry is a safer
+equivalent of ``%s`` ``dentry->d_name.name`` we used to use, ``%pd<n>`` prints
+``n`` last components. ``%pD`` does the same thing for struct file.
- Passed by reference.
+Passed by reference.
-block_device names:
+block_device names
+==================
+
+::
%pg sda, sda1 or loop0p1
- For printing name of block_device pointers.
+For printing name of block_device pointers.
+
+struct va_format
+================
-struct va_format:
+::
%pV
- For printing struct va_format structures. These contain a format string
- and va_list as follows:
+For printing struct va_format structures. These contain a format string
+and va_list as follows::
struct va_format {
const char *fmt;
va_list *va;
};
- Implements a "recursive vsnprintf".
+Implements a "recursive vsnprintf".
- Do not use this feature without some mechanism to verify the
- correctness of the format string and va_list arguments.
+Do not use this feature without some mechanism to verify the
+correctness of the format string and va_list arguments.
- Passed by reference.
+Passed by reference.
+
+kobjects
+========
+
+::
-kobjects:
%pO
Base specifier for kobject based structs. Must be followed with
@@ -311,61 +376,70 @@ kobjects:
Passed by reference.
-struct clk:
+
+struct clk
+==========
+
+::
%pC pll1
%pCn pll1
%pCr 1560000000
- For printing struct clk structures. '%pC' and '%pCn' print the name
- (Common Clock Framework) or address (legacy clock framework) of the
- structure; '%pCr' prints the current clock rate.
+For printing struct clk structures. ``%pC`` and ``%pCn`` print the name
+(Common Clock Framework) or address (legacy clock framework) of the
+structure; ``%pCr`` prints the current clock rate.
- Passed by reference.
+Passed by reference.
-bitmap and its derivatives such as cpumask and nodemask:
+bitmap and its derivatives such as cpumask and nodemask
+=======================================================
+
+::
%*pb 0779
%*pbl 0,3-6,8-10
- For printing bitmap and its derivatives such as cpumask and nodemask,
- %*pb output the bitmap with field width as the number of bits and %*pbl
- output the bitmap as range list with field width as the number of bits.
+For printing bitmap and its derivatives such as cpumask and nodemask,
+``%*pb`` output the bitmap with field width as the number of bits and ``%*pbl``
+output the bitmap as range list with field width as the number of bits.
- Passed by reference.
+Passed by reference.
+
+Flags bitfields such as page flags, gfp_flags
+=============================================
-Flags bitfields such as page flags, gfp_flags:
+::
%pGp referenced|uptodate|lru|active|private
%pGg GFP_USER|GFP_DMA32|GFP_NOWARN
%pGv read|exec|mayread|maywrite|mayexec|denywrite
- For printing flags bitfields as a collection of symbolic constants that
- would construct the value. The type of flags is given by the third
- character. Currently supported are [p]age flags, [v]ma_flags (both
- expect unsigned long *) and [g]fp_flags (expects gfp_t *). The flag
- names and print order depends on the particular type.
+For printing flags bitfields as a collection of symbolic constants that
+would construct the value. The type of flags is given by the third
+character. Currently supported are [p]age flags, [v]ma_flags (both
+expect ``unsigned long *``) and [g]fp_flags (expects ``gfp_t *``). The flag
+names and print order depends on the particular type.
- Note that this format should not be used directly in TP_printk() part
- of a tracepoint. Instead, use the show_*_flags() functions from
- <trace/events/mmflags.h>.
+Note that this format should not be used directly in :c:func:`TP_printk()` part
+of a tracepoint. Instead, use the ``show_*_flags()`` functions from
+<trace/events/mmflags.h>.
- Passed by reference.
+Passed by reference.
+
+Network device features
+=======================
-Network device features:
+::
%pNF 0x000000000000c000
- For printing netdev_features_t.
+For printing netdev_features_t.
- Passed by reference.
+Passed by reference.
-If you add other %p extensions, please extend lib/test_printf.c with
+If you add other ``%p`` extensions, please extend lib/test_printf.c with
one or more test cases, if at all feasible.
Thank you for your cooperation and attention.
-
-
-By Randy Dunlap <rdunlap@infradead.org> and
-Andrew Murray <amurray@mpc-data.co.uk>
diff --git a/Documentation/pwm.txt b/Documentation/pwm.txt
index 789b27c6ec99..8fbf0aa3ba2d 100644
--- a/Documentation/pwm.txt
+++ b/Documentation/pwm.txt
@@ -1,4 +1,6 @@
+======================================
Pulse Width Modulation (PWM) interface
+======================================
This provides an overview about the Linux PWM interface
@@ -16,7 +18,7 @@ Users of the legacy PWM API use unique IDs to refer to PWM devices.
Instead of referring to a PWM device via its unique ID, board setup code
should instead register a static mapping that can be used to match PWM
-consumers to providers, as given in the following example:
+consumers to providers, as given in the following example::
static struct pwm_lookup board_pwm_lookup[] = {
PWM_LOOKUP("tegra-pwm", 0, "pwm-backlight", NULL,
@@ -40,9 +42,9 @@ New users should use the pwm_get() function and pass to it the consumer
device or a consumer name. pwm_put() is used to free the PWM device. Managed
variants of these functions, devm_pwm_get() and devm_pwm_put(), also exist.
-After being requested, a PWM has to be configured using:
+After being requested, a PWM has to be configured using::
-int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state);
+ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state);
This API controls both the PWM period/duty_cycle config and the
enable/disable state.
@@ -72,11 +74,14 @@ interface is provided to use the PWMs from userspace. It is exposed at
pwmchipN, where N is the base of the PWM chip. Inside the directory you
will find:
-npwm - The number of PWM channels this chip supports (read-only).
+ npwm
+ The number of PWM channels this chip supports (read-only).
-export - Exports a PWM channel for use with sysfs (write-only).
+ export
+ Exports a PWM channel for use with sysfs (write-only).
-unexport - Unexports a PWM channel from sysfs (write-only).
+ unexport
+ Unexports a PWM channel from sysfs (write-only).
The PWM channels are numbered using a per-chip index from 0 to npwm-1.
@@ -84,21 +89,26 @@ When a PWM channel is exported a pwmX directory will be created in the
pwmchipN directory it is associated with, where X is the number of the
channel that was exported. The following properties will then be available:
-period - The total period of the PWM signal (read/write).
- Value is in nanoseconds and is the sum of the active and inactive
- time of the PWM.
+ period
+ The total period of the PWM signal (read/write).
+ Value is in nanoseconds and is the sum of the active and inactive
+ time of the PWM.
-duty_cycle - The active time of the PWM signal (read/write).
- Value is in nanoseconds and must be less than the period.
+ duty_cycle
+ The active time of the PWM signal (read/write).
+ Value is in nanoseconds and must be less than the period.
-polarity - Changes the polarity of the PWM signal (read/write).
- Writes to this property only work if the PWM chip supports changing
- the polarity. The polarity can only be changed if the PWM is not
- enabled. Value is the string "normal" or "inversed".
+ polarity
+ Changes the polarity of the PWM signal (read/write).
+ Writes to this property only work if the PWM chip supports changing
+ the polarity. The polarity can only be changed if the PWM is not
+ enabled. Value is the string "normal" or "inversed".
-enable - Enable/disable the PWM signal (read/write).
- 0 - disabled
- 1 - enabled
+ enable
+ Enable/disable the PWM signal (read/write).
+
+ - 0 - disabled
+ - 1 - enabled
Implementing a PWM driver
-------------------------
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index b9d9cc57be18..b8a8c70b0188 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -1,7 +1,10 @@
+=================================
Red-black Trees (rbtree) in Linux
-January 18, 2007
-Rob Landley <rob@landley.net>
-=============================
+=================================
+
+
+:Date: January 18, 2007
+:Author: Rob Landley <rob@landley.net>
What are red-black trees, and what are they for?
------------------------------------------------
@@ -56,7 +59,7 @@ user of the rbtree code.
Creating a new rbtree
---------------------
-Data nodes in an rbtree tree are structures containing a struct rb_node member:
+Data nodes in an rbtree tree are structures containing a struct rb_node member::
struct mytype {
struct rb_node node;
@@ -78,7 +81,7 @@ Searching for a value in an rbtree
Writing a search function for your tree is fairly straightforward: start at the
root, compare each value, and follow the left or right branch as necessary.
-Example:
+Example::
struct mytype *my_search(struct rb_root *root, char *string)
{
@@ -110,7 +113,7 @@ The search for insertion differs from the previous search by finding the
location of the pointer on which to graft the new node. The new node also
needs a link to its parent node for rebalancing purposes.
-Example:
+Example::
int my_insert(struct rb_root *root, struct mytype *data)
{
@@ -140,11 +143,11 @@ Example:
Removing or replacing existing data in an rbtree
------------------------------------------------
-To remove an existing node from a tree, call:
+To remove an existing node from a tree, call::
void rb_erase(struct rb_node *victim, struct rb_root *tree);
-Example:
+Example::
struct mytype *data = mysearch(&mytree, "walrus");
@@ -153,7 +156,7 @@ Example:
myfree(data);
}
-To replace an existing node in a tree with a new one with the same key, call:
+To replace an existing node in a tree with a new one with the same key, call::
void rb_replace_node(struct rb_node *old, struct rb_node *new,
struct rb_root *tree);
@@ -166,7 +169,7 @@ Iterating through the elements stored in an rbtree (in sort order)
Four functions are provided for iterating through an rbtree's contents in
sorted order. These work on arbitrary trees, and should not need to be
-modified or wrapped (except for locking purposes):
+modified or wrapped (except for locking purposes)::
struct rb_node *rb_first(struct rb_root *tree);
struct rb_node *rb_last(struct rb_root *tree);
@@ -184,7 +187,7 @@ which the containing data structure may be accessed with the container_of()
macro, and individual members may be accessed directly via
rb_entry(node, type, member).
-Example:
+Example::
struct rb_node *node;
for (node = rb_first(&mytree); node; node = rb_next(node))
@@ -241,7 +244,8 @@ user should have a single rb_erase_augmented() call site in order to limit
compiled code size.
-Sample usage:
+Sample usage
+^^^^^^^^^^^^
Interval tree is an example of augmented rb tree. Reference -
"Introduction to Algorithms" by Cormen, Leiserson, Rivest and Stein.
@@ -259,12 +263,12 @@ This "extra information" stored in each node is the maximum hi
information can be maintained at each node just be looking at the node
and its immediate children. And this will be used in O(log n) lookup
for lowest match (lowest start address among all possible matches)
-with something like:
+with something like::
-struct interval_tree_node *
-interval_tree_first_match(struct rb_root *root,
- unsigned long start, unsigned long last)
-{
+ struct interval_tree_node *
+ interval_tree_first_match(struct rb_root *root,
+ unsigned long start, unsigned long last)
+ {
struct interval_tree_node *node;
if (!root->rb_node)
@@ -301,13 +305,13 @@ interval_tree_first_match(struct rb_root *root,
}
return NULL; /* No match */
}
-}
+ }
-Insertion/removal are defined using the following augmented callbacks:
+Insertion/removal are defined using the following augmented callbacks::
-static inline unsigned long
-compute_subtree_last(struct interval_tree_node *node)
-{
+ static inline unsigned long
+ compute_subtree_last(struct interval_tree_node *node)
+ {
unsigned long max = node->last, subtree_last;
if (node->rb.rb_left) {
subtree_last = rb_entry(node->rb.rb_left,
@@ -322,10 +326,10 @@ compute_subtree_last(struct interval_tree_node *node)
max = subtree_last;
}
return max;
-}
+ }
-static void augment_propagate(struct rb_node *rb, struct rb_node *stop)
-{
+ static void augment_propagate(struct rb_node *rb, struct rb_node *stop)
+ {
while (rb != stop) {
struct interval_tree_node *node =
rb_entry(rb, struct interval_tree_node, rb);
@@ -335,20 +339,20 @@ static void augment_propagate(struct rb_node *rb, struct rb_node *stop)
node->__subtree_last = subtree_last;
rb = rb_parent(&node->rb);
}
-}
+ }
-static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new)
-{
+ static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new)
+ {
struct interval_tree_node *old =
rb_entry(rb_old, struct interval_tree_node, rb);
struct interval_tree_node *new =
rb_entry(rb_new, struct interval_tree_node, rb);
new->__subtree_last = old->__subtree_last;
-}
+ }
-static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
-{
+ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
+ {
struct interval_tree_node *old =
rb_entry(rb_old, struct interval_tree_node, rb);
struct interval_tree_node *new =
@@ -356,15 +360,15 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
new->__subtree_last = old->__subtree_last;
old->__subtree_last = compute_subtree_last(old);
-}
+ }
-static const struct rb_augment_callbacks augment_callbacks = {
+ static const struct rb_augment_callbacks augment_callbacks = {
augment_propagate, augment_copy, augment_rotate
-};
+ };
-void interval_tree_insert(struct interval_tree_node *node,
- struct rb_root *root)
-{
+ void interval_tree_insert(struct interval_tree_node *node,
+ struct rb_root *root)
+ {
struct rb_node **link = &root->rb_node, *rb_parent = NULL;
unsigned long start = node->start, last = node->last;
struct interval_tree_node *parent;
@@ -383,10 +387,10 @@ void interval_tree_insert(struct interval_tree_node *node,
node->__subtree_last = last;
rb_link_node(&node->rb, rb_parent, link);
rb_insert_augmented(&node->rb, root, &augment_callbacks);
-}
+ }
-void interval_tree_remove(struct interval_tree_node *node,
- struct rb_root *root)
-{
+ void interval_tree_remove(struct interval_tree_node *node,
+ struct rb_root *root)
+ {
rb_erase_augmented(&node->rb, root, &augment_callbacks);
-}
+ }
diff --git a/Documentation/remoteproc.txt b/Documentation/remoteproc.txt
index f07597482351..77fb03acdbb4 100644
--- a/Documentation/remoteproc.txt
+++ b/Documentation/remoteproc.txt
@@ -1,6 +1,9 @@
+==========================
Remote Processor Framework
+==========================
-1. Introduction
+Introduction
+============
Modern SoCs typically have heterogeneous remote processor devices in asymmetric
multiprocessing (AMP) configurations, which may be running different instances
@@ -26,44 +29,62 @@ remoteproc will add those devices. This makes it possible to reuse the
existing virtio drivers with remote processor backends at a minimal development
cost.
-2. User API
+User API
+========
+
+::
int rproc_boot(struct rproc *rproc)
- - Boot a remote processor (i.e. load its firmware, power it on, ...).
- If the remote processor is already powered on, this function immediately
- returns (successfully).
- Returns 0 on success, and an appropriate error value otherwise.
- Note: to use this function you should already have a valid rproc
- handle. There are several ways to achieve that cleanly (devres, pdata,
- the way remoteproc_rpmsg.c does this, or, if this becomes prevalent, we
- might also consider using dev_archdata for this).
+
+Boot a remote processor (i.e. load its firmware, power it on, ...).
+
+If the remote processor is already powered on, this function immediately
+returns (successfully).
+
+Returns 0 on success, and an appropriate error value otherwise.
+Note: to use this function you should already have a valid rproc
+handle. There are several ways to achieve that cleanly (devres, pdata,
+the way remoteproc_rpmsg.c does this, or, if this becomes prevalent, we
+might also consider using dev_archdata for this).
+
+::
void rproc_shutdown(struct rproc *rproc)
- - Power off a remote processor (previously booted with rproc_boot()).
- In case @rproc is still being used by an additional user(s), then
- this function will just decrement the power refcount and exit,
- without really powering off the device.
- Every call to rproc_boot() must (eventually) be accompanied by a call
- to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
- Notes:
- - we're not decrementing the rproc's refcount, only the power refcount.
- which means that the @rproc handle stays valid even after
- rproc_shutdown() returns, and users can still use it with a subsequent
- rproc_boot(), if needed.
+
+Power off a remote processor (previously booted with rproc_boot()).
+In case @rproc is still being used by an additional user(s), then
+this function will just decrement the power refcount and exit,
+without really powering off the device.
+
+Every call to rproc_boot() must (eventually) be accompanied by a call
+to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
+
+.. note::
+
+ we're not decrementing the rproc's refcount, only the power refcount.
+ which means that the @rproc handle stays valid even after
+ rproc_shutdown() returns, and users can still use it with a subsequent
+ rproc_boot(), if needed.
+
+::
struct rproc *rproc_get_by_phandle(phandle phandle)
- - Find an rproc handle using a device tree phandle. Returns the rproc
- handle on success, and NULL on failure. This function increments
- the remote processor's refcount, so always use rproc_put() to
- decrement it back once rproc isn't needed anymore.
-3. Typical usage
+Find an rproc handle using a device tree phandle. Returns the rproc
+handle on success, and NULL on failure. This function increments
+the remote processor's refcount, so always use rproc_put() to
+decrement it back once rproc isn't needed anymore.
+
+Typical usage
+=============
-#include <linux/remoteproc.h>
+::
-/* in case we were given a valid 'rproc' handle */
-int dummy_rproc_example(struct rproc *my_rproc)
-{
+ #include <linux/remoteproc.h>
+
+ /* in case we were given a valid 'rproc' handle */
+ int dummy_rproc_example(struct rproc *my_rproc)
+ {
int ret;
/* let's power on and boot our remote processor */
@@ -80,84 +101,111 @@ int dummy_rproc_example(struct rproc *my_rproc)
/* let's shut it down now */
rproc_shutdown(my_rproc);
-}
+ }
+
+API for implementors
+====================
-4. API for implementors
+::
struct rproc *rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
const char *firmware, int len)
- - Allocate a new remote processor handle, but don't register
- it yet. Required parameters are the underlying device, the
- name of this remote processor, platform-specific ops handlers,
- the name of the firmware to boot this rproc with, and the
- length of private data needed by the allocating rproc driver (in bytes).
-
- This function should be used by rproc implementations during
- initialization of the remote processor.
- After creating an rproc handle using this function, and when ready,
- implementations should then call rproc_add() to complete
- the registration of the remote processor.
- On success, the new rproc is returned, and on failure, NULL.
-
- Note: _never_ directly deallocate @rproc, even if it was not registered
- yet. Instead, when you need to unroll rproc_alloc(), use rproc_free().
+
+Allocate a new remote processor handle, but don't register
+it yet. Required parameters are the underlying device, the
+name of this remote processor, platform-specific ops handlers,
+the name of the firmware to boot this rproc with, and the
+length of private data needed by the allocating rproc driver (in bytes).
+
+This function should be used by rproc implementations during
+initialization of the remote processor.
+
+After creating an rproc handle using this function, and when ready,
+implementations should then call rproc_add() to complete
+the registration of the remote processor.
+
+On success, the new rproc is returned, and on failure, NULL.
+
+.. note::
+
+ **never** directly deallocate @rproc, even if it was not registered
+ yet. Instead, when you need to unroll rproc_alloc(), use rproc_free().
+
+::
void rproc_free(struct rproc *rproc)
- - Free an rproc handle that was allocated by rproc_alloc.
- This function essentially unrolls rproc_alloc(), by decrementing the
- rproc's refcount. It doesn't directly free rproc; that would happen
- only if there are no other references to rproc and its refcount now
- dropped to zero.
+
+Free an rproc handle that was allocated by rproc_alloc.
+
+This function essentially unrolls rproc_alloc(), by decrementing the
+rproc's refcount. It doesn't directly free rproc; that would happen
+only if there are no other references to rproc and its refcount now
+dropped to zero.
+
+::
int rproc_add(struct rproc *rproc)
- - Register @rproc with the remoteproc framework, after it has been
- allocated with rproc_alloc().
- This is called by the platform-specific rproc implementation, whenever
- a new remote processor device is probed.
- Returns 0 on success and an appropriate error code otherwise.
- Note: this function initiates an asynchronous firmware loading
- context, which will look for virtio devices supported by the rproc's
- firmware.
- If found, those virtio devices will be created and added, so as a result
- of registering this remote processor, additional virtio drivers might get
- probed.
+
+Register @rproc with the remoteproc framework, after it has been
+allocated with rproc_alloc().
+
+This is called by the platform-specific rproc implementation, whenever
+a new remote processor device is probed.
+
+Returns 0 on success and an appropriate error code otherwise.
+Note: this function initiates an asynchronous firmware loading
+context, which will look for virtio devices supported by the rproc's
+firmware.
+
+If found, those virtio devices will be created and added, so as a result
+of registering this remote processor, additional virtio drivers might get
+probed.
+
+::
int rproc_del(struct rproc *rproc)
- - Unroll rproc_add().
- This function should be called when the platform specific rproc
- implementation decides to remove the rproc device. it should
- _only_ be called if a previous invocation of rproc_add()
- has completed successfully.
- After rproc_del() returns, @rproc is still valid, and its
- last refcount should be decremented by calling rproc_free().
+Unroll rproc_add().
+
+This function should be called when the platform specific rproc
+implementation decides to remove the rproc device. it should
+_only_ be called if a previous invocation of rproc_add()
+has completed successfully.
- Returns 0 on success and -EINVAL if @rproc isn't valid.
+After rproc_del() returns, @rproc is still valid, and its
+last refcount should be decremented by calling rproc_free().
+
+Returns 0 on success and -EINVAL if @rproc isn't valid.
+
+::
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
- - Report a crash in a remoteproc
- This function must be called every time a crash is detected by the
- platform specific rproc implementation. This should not be called from a
- non-remoteproc driver. This function can be called from atomic/interrupt
- context.
-5. Implementation callbacks
+Report a crash in a remoteproc
+
+This function must be called every time a crash is detected by the
+platform specific rproc implementation. This should not be called from a
+non-remoteproc driver. This function can be called from atomic/interrupt
+context.
+
+Implementation callbacks
+========================
These callbacks should be provided by platform-specific remoteproc
-drivers:
-
-/**
- * struct rproc_ops - platform-specific device handlers
- * @start: power on the device and boot it
- * @stop: power off the device
- * @kick: kick a virtqueue (virtqueue id given as a parameter)
- */
-struct rproc_ops {
+drivers::
+
+ /**
+ * struct rproc_ops - platform-specific device handlers
+ * @start: power on the device and boot it
+ * @stop: power off the device
+ * @kick: kick a virtqueue (virtqueue id given as a parameter)
+ */
+ struct rproc_ops {
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
-};
+ };
Every remoteproc implementation should at least provide the ->start and ->stop
handlers. If rpmsg/virtio functionality is also desired, then the ->kick handler
@@ -179,7 +227,8 @@ the exact virtqueue index to look in is optional: it is easy (and not
too expensive) to go through the existing virtqueues and look for new buffers
in the used rings.
-6. Binary Firmware Structure
+Binary Firmware Structure
+=========================
At this point remoteproc only supports ELF32 firmware binaries. However,
it is quite expected that other platforms/devices which we'd want to
@@ -207,43 +256,43 @@ resource entries that publish the existence of supported features
or configurations by the remote processor, such as trace buffers and
supported virtio devices (and their configurations).
-The resource table begins with this header:
-
-/**
- * struct resource_table - firmware resource table header
- * @ver: version number
- * @num: number of resource entries
- * @reserved: reserved (must be zero)
- * @offset: array of offsets pointing at the various resource entries
- *
- * The header of the resource table, as expressed by this structure,
- * contains a version number (should we need to change this format in the
- * future), the number of available resource entries, and their offsets
- * in the table.
- */
-struct resource_table {
+The resource table begins with this header::
+
+ /**
+ * struct resource_table - firmware resource table header
+ * @ver: version number
+ * @num: number of resource entries
+ * @reserved: reserved (must be zero)
+ * @offset: array of offsets pointing at the various resource entries
+ *
+ * The header of the resource table, as expressed by this structure,
+ * contains a version number (should we need to change this format in the
+ * future), the number of available resource entries, and their offsets
+ * in the table.
+ */
+ struct resource_table {
u32 ver;
u32 num;
u32 reserved[2];
u32 offset[0];
-} __packed;
+ } __packed;
Immediately following this header are the resource entries themselves,
-each of which begins with the following resource entry header:
-
-/**
- * struct fw_rsc_hdr - firmware resource entry header
- * @type: resource type
- * @data: resource data
- *
- * Every resource entry begins with a 'struct fw_rsc_hdr' header providing
- * its @type. The content of the entry itself will immediately follow
- * this header, and it should be parsed according to the resource type.
- */
-struct fw_rsc_hdr {
+each of which begins with the following resource entry header::
+
+ /**
+ * struct fw_rsc_hdr - firmware resource entry header
+ * @type: resource type
+ * @data: resource data
+ *
+ * Every resource entry begins with a 'struct fw_rsc_hdr' header providing
+ * its @type. The content of the entry itself will immediately follow
+ * this header, and it should be parsed according to the resource type.
+ */
+ struct fw_rsc_hdr {
u32 type;
u8 data[0];
-} __packed;
+ } __packed;
Some resources entries are mere announcements, where the host is informed
of specific remoteproc configuration. Other entries require the host to
@@ -252,32 +301,32 @@ is expected, where the firmware requests a resource, and once allocated,
the host should provide back its details (e.g. address of an allocated
memory region).
-Here are the various resource types that are currently supported:
-
-/**
- * enum fw_resource_type - types of resource entries
- *
- * @RSC_CARVEOUT: request for allocation of a physically contiguous
- * memory region.
- * @RSC_DEVMEM: request to iommu_map a memory-based peripheral.
- * @RSC_TRACE: announces the availability of a trace buffer into which
- * the remote processor will be writing logs.
- * @RSC_VDEV: declare support for a virtio device, and serve as its
- * virtio header.
- * @RSC_LAST: just keep this one at the end
- *
- * Please note that these values are used as indices to the rproc_handle_rsc
- * lookup table, so please keep them sane. Moreover, @RSC_LAST is used to
- * check the validity of an index before the lookup table is accessed, so
- * please update it as needed.
- */
-enum fw_resource_type {
+Here are the various resource types that are currently supported::
+
+ /**
+ * enum fw_resource_type - types of resource entries
+ *
+ * @RSC_CARVEOUT: request for allocation of a physically contiguous
+ * memory region.
+ * @RSC_DEVMEM: request to iommu_map a memory-based peripheral.
+ * @RSC_TRACE: announces the availability of a trace buffer into which
+ * the remote processor will be writing logs.
+ * @RSC_VDEV: declare support for a virtio device, and serve as its
+ * virtio header.
+ * @RSC_LAST: just keep this one at the end
+ *
+ * Please note that these values are used as indices to the rproc_handle_rsc
+ * lookup table, so please keep them sane. Moreover, @RSC_LAST is used to
+ * check the validity of an index before the lookup table is accessed, so
+ * please update it as needed.
+ */
+ enum fw_resource_type {
RSC_CARVEOUT = 0,
RSC_DEVMEM = 1,
RSC_TRACE = 2,
RSC_VDEV = 3,
RSC_LAST = 4,
-};
+ };
For more details regarding a specific resource type, please see its
dedicated structure in include/linux/remoteproc.h.
@@ -286,7 +335,8 @@ We also expect that platform-specific resource entries will show up
at some point. When that happens, we could easily add a new RSC_PLATFORM
type, and hand those resources to the platform-specific rproc driver to handle.
-7. Virtio and remoteproc
+Virtio and remoteproc
+=====================
The firmware should provide remoteproc information about virtio devices
that it supports, and their configurations: a RSC_VDEV resource entry
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index 8c174063b3f0..a289285d2412 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -1,13 +1,13 @@
+===============================
rfkill - RF kill switch support
===============================
-1. Introduction
-2. Implementation details
-3. Kernel API
-4. Userspace support
+.. contents::
+ :depth: 2
-1. Introduction
+Introduction
+============
The rfkill subsystem provides a generic interface to disabling any radio
transmitter in the system. When a transmitter is blocked, it shall not
@@ -21,17 +21,24 @@ aircraft.
The rfkill subsystem has a concept of "hard" and "soft" block, which
differ little in their meaning (block == transmitters off) but rather in
whether they can be changed or not:
- - hard block: read-only radio block that cannot be overridden by software
- - soft block: writable radio block (need not be readable) that is set by
- the system software.
+
+ - hard block
+ read-only radio block that cannot be overridden by software
+
+ - soft block
+ writable radio block (need not be readable) that is set by
+ the system software.
The rfkill subsystem has two parameters, rfkill.default_state and
-rfkill.master_switch_mode, which are documented in admin-guide/kernel-parameters.rst.
+rfkill.master_switch_mode, which are documented in
+admin-guide/kernel-parameters.rst.
-2. Implementation details
+Implementation details
+======================
The rfkill subsystem is composed of three main components:
+
* the rfkill core,
* the deprecated rfkill-input module (an input layer handler, being
replaced by userspace policy code) and
@@ -55,7 +62,8 @@ use the return value of rfkill_set_hw_state() unless the hardware actually
keeps track of soft and hard block separately.
-3. Kernel API
+Kernel API
+==========
Drivers for radio transmitters normally implement an rfkill driver.
@@ -69,7 +77,7 @@ For some platforms, it is possible that the hardware state changes during
suspend/hibernation, in which case it will be necessary to update the rfkill
core with the current state is at resume time.
-To create an rfkill driver, driver's Kconfig needs to have
+To create an rfkill driver, driver's Kconfig needs to have::
depends on RFKILL || !RFKILL
@@ -87,7 +95,8 @@ RFKill provides per-switch LED triggers, which can be used to drive LEDs
according to the switch state (LED_FULL when blocked, LED_OFF otherwise).
-5. Userspace support
+Userspace support
+=================
The recommended userspace interface to use is /dev/rfkill, which is a misc
character device that allows userspace to obtain and set the state of rfkill
@@ -112,11 +121,11 @@ rfkill core framework.
Additionally, each rfkill device is registered in sysfs and emits uevents.
rfkill devices issue uevents (with an action of "change"), with the following
-environment variables set:
+environment variables set::
-RFKILL_NAME
-RFKILL_STATE
-RFKILL_TYPE
+ RFKILL_NAME
+ RFKILL_STATE
+ RFKILL_TYPE
The contents of these variables corresponds to the "name", "state" and
"type" sysfs files explained above.
diff --git a/Documentation/robust-futex-ABI.txt b/Documentation/robust-futex-ABI.txt
index 16eb314f56cc..8a5d34abf726 100644
--- a/Documentation/robust-futex-ABI.txt
+++ b/Documentation/robust-futex-ABI.txt
@@ -1,7 +1,9 @@
-Started by Paul Jackson <pj@sgi.com>
-
+====================
The robust futex ABI
---------------------
+====================
+
+:Author: Started by Paul Jackson <pj@sgi.com>
+
Robust_futexes provide a mechanism that is used in addition to normal
futexes, for kernel assist of cleanup of held locks on task exit.
@@ -32,7 +34,7 @@ probably causing deadlock or other such failure of the other threads
waiting on the same locks.
A thread that anticipates possibly using robust_futexes should first
-issue the system call:
+issue the system call::
asmlinkage long
sys_set_robust_list(struct robust_list_head __user *head, size_t len);
@@ -91,7 +93,7 @@ that lock using the futex mechanism.
When a thread has invoked the above system call to indicate it
anticipates using robust_futexes, the kernel stores the passed in 'head'
pointer for that task. The task may retrieve that value later on by
-using the system call:
+using the system call::
asmlinkage long
sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr,
@@ -135,6 +137,7 @@ manipulating this list), the user code must observe the following
protocol on 'lock entry' insertion and removal:
On insertion:
+
1) set the 'list_op_pending' word to the address of the 'lock entry'
to be inserted,
2) acquire the futex lock,
@@ -143,6 +146,7 @@ On insertion:
4) clear the 'list_op_pending' word.
On removal:
+
1) set the 'list_op_pending' word to the address of the 'lock entry'
to be removed,
2) remove the lock entry for this lock from the 'head' list,
diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt
index 61c22d608759..6c42c75103eb 100644
--- a/Documentation/robust-futexes.txt
+++ b/Documentation/robust-futexes.txt
@@ -1,4 +1,8 @@
-Started by: Ingo Molnar <mingo@redhat.com>
+========================================
+A description of what robust futexes are
+========================================
+
+:Started by: Ingo Molnar <mingo@redhat.com>
Background
----------
@@ -163,7 +167,7 @@ Implementation details
----------------------
The patch adds two new syscalls: one to register the userspace list, and
-one to query the registered list pointer:
+one to query the registered list pointer::
asmlinkage long
sys_set_robust_list(struct robust_list_head __user *head,
@@ -185,7 +189,7 @@ straightforward. The kernel doesn't have any internal distinction between
robust and normal futexes.
If a futex is found to be held at exit time, the kernel sets the
-following bit of the futex word:
+following bit of the futex word::
#define FUTEX_OWNER_DIED 0x40000000
@@ -193,7 +197,7 @@ and wakes up the next futex waiter (if any). User-space does the rest of
the cleanup.
Otherwise, robust futexes are acquired by glibc by putting the TID into
-the futex field atomically. Waiters set the FUTEX_WAITERS bit:
+the futex field atomically. Waiters set the FUTEX_WAITERS bit::
#define FUTEX_WAITERS 0x80000000
diff --git a/Documentation/rpmsg.txt b/Documentation/rpmsg.txt
index a95e36a43288..24b7a9e1a5f9 100644
--- a/Documentation/rpmsg.txt
+++ b/Documentation/rpmsg.txt
@@ -1,10 +1,15 @@
+============================================
Remote Processor Messaging (rpmsg) Framework
+============================================
-Note: this document describes the rpmsg bus and how to write rpmsg drivers.
-To learn how to add rpmsg support for new platforms, check out remoteproc.txt
-(also a resident of Documentation/).
+.. note::
-1. Introduction
+ This document describes the rpmsg bus and how to write rpmsg drivers.
+ To learn how to add rpmsg support for new platforms, check out remoteproc.txt
+ (also a resident of Documentation/).
+
+Introduction
+============
Modern SoCs typically employ heterogeneous remote processor devices in
asymmetric multiprocessing (AMP) configurations, which may be running
@@ -58,170 +63,222 @@ to their destination address (this is done by invoking the driver's rx handler
with the payload of the inbound message).
-2. User API
+User API
+========
+
+::
int rpmsg_send(struct rpmsg_channel *rpdev, void *data, int len);
- - sends a message across to the remote processor on a given channel.
- The caller should specify the channel, the data it wants to send,
- and its length (in bytes). The message will be sent on the specified
- channel, i.e. its source and destination address fields will be
- set to the channel's src and dst addresses.
-
- In case there are no TX buffers available, the function will block until
- one becomes available (i.e. until the remote processor consumes
- a tx buffer and puts it back on virtio's used descriptor ring),
- or a timeout of 15 seconds elapses. When the latter happens,
- -ERESTARTSYS is returned.
- The function can only be called from a process context (for now).
- Returns 0 on success and an appropriate error value on failure.
+
+sends a message across to the remote processor on a given channel.
+The caller should specify the channel, the data it wants to send,
+and its length (in bytes). The message will be sent on the specified
+channel, i.e. its source and destination address fields will be
+set to the channel's src and dst addresses.
+
+In case there are no TX buffers available, the function will block until
+one becomes available (i.e. until the remote processor consumes
+a tx buffer and puts it back on virtio's used descriptor ring),
+or a timeout of 15 seconds elapses. When the latter happens,
+-ERESTARTSYS is returned.
+
+The function can only be called from a process context (for now).
+Returns 0 on success and an appropriate error value on failure.
+
+::
int rpmsg_sendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst);
- - sends a message across to the remote processor on a given channel,
- to a destination address provided by the caller.
- The caller should specify the channel, the data it wants to send,
- its length (in bytes), and an explicit destination address.
- The message will then be sent to the remote processor to which the
- channel belongs, using the channel's src address, and the user-provided
- dst address (thus the channel's dst address will be ignored).
-
- In case there are no TX buffers available, the function will block until
- one becomes available (i.e. until the remote processor consumes
- a tx buffer and puts it back on virtio's used descriptor ring),
- or a timeout of 15 seconds elapses. When the latter happens,
- -ERESTARTSYS is returned.
- The function can only be called from a process context (for now).
- Returns 0 on success and an appropriate error value on failure.
+
+sends a message across to the remote processor on a given channel,
+to a destination address provided by the caller.
+
+The caller should specify the channel, the data it wants to send,
+its length (in bytes), and an explicit destination address.
+
+The message will then be sent to the remote processor to which the
+channel belongs, using the channel's src address, and the user-provided
+dst address (thus the channel's dst address will be ignored).
+
+In case there are no TX buffers available, the function will block until
+one becomes available (i.e. until the remote processor consumes
+a tx buffer and puts it back on virtio's used descriptor ring),
+or a timeout of 15 seconds elapses. When the latter happens,
+-ERESTARTSYS is returned.
+
+The function can only be called from a process context (for now).
+Returns 0 on success and an appropriate error value on failure.
+
+::
int rpmsg_send_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
void *data, int len);
- - sends a message across to the remote processor, using the src and dst
- addresses provided by the user.
- The caller should specify the channel, the data it wants to send,
- its length (in bytes), and explicit source and destination addresses.
- The message will then be sent to the remote processor to which the
- channel belongs, but the channel's src and dst addresses will be
- ignored (and the user-provided addresses will be used instead).
-
- In case there are no TX buffers available, the function will block until
- one becomes available (i.e. until the remote processor consumes
- a tx buffer and puts it back on virtio's used descriptor ring),
- or a timeout of 15 seconds elapses. When the latter happens,
- -ERESTARTSYS is returned.
- The function can only be called from a process context (for now).
- Returns 0 on success and an appropriate error value on failure.
+
+
+sends a message across to the remote processor, using the src and dst
+addresses provided by the user.
+
+The caller should specify the channel, the data it wants to send,
+its length (in bytes), and explicit source and destination addresses.
+The message will then be sent to the remote processor to which the
+channel belongs, but the channel's src and dst addresses will be
+ignored (and the user-provided addresses will be used instead).
+
+In case there are no TX buffers available, the function will block until
+one becomes available (i.e. until the remote processor consumes
+a tx buffer and puts it back on virtio's used descriptor ring),
+or a timeout of 15 seconds elapses. When the latter happens,
+-ERESTARTSYS is returned.
+
+The function can only be called from a process context (for now).
+Returns 0 on success and an appropriate error value on failure.
+
+::
int rpmsg_trysend(struct rpmsg_channel *rpdev, void *data, int len);
- - sends a message across to the remote processor on a given channel.
- The caller should specify the channel, the data it wants to send,
- and its length (in bytes). The message will be sent on the specified
- channel, i.e. its source and destination address fields will be
- set to the channel's src and dst addresses.
- In case there are no TX buffers available, the function will immediately
- return -ENOMEM without waiting until one becomes available.
- The function can only be called from a process context (for now).
- Returns 0 on success and an appropriate error value on failure.
+sends a message across to the remote processor on a given channel.
+The caller should specify the channel, the data it wants to send,
+and its length (in bytes). The message will be sent on the specified
+channel, i.e. its source and destination address fields will be
+set to the channel's src and dst addresses.
+
+In case there are no TX buffers available, the function will immediately
+return -ENOMEM without waiting until one becomes available.
+
+The function can only be called from a process context (for now).
+Returns 0 on success and an appropriate error value on failure.
+
+::
int rpmsg_trysendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst)
- - sends a message across to the remote processor on a given channel,
- to a destination address provided by the user.
- The user should specify the channel, the data it wants to send,
- its length (in bytes), and an explicit destination address.
- The message will then be sent to the remote processor to which the
- channel belongs, using the channel's src address, and the user-provided
- dst address (thus the channel's dst address will be ignored).
-
- In case there are no TX buffers available, the function will immediately
- return -ENOMEM without waiting until one becomes available.
- The function can only be called from a process context (for now).
- Returns 0 on success and an appropriate error value on failure.
+
+
+sends a message across to the remote processor on a given channel,
+to a destination address provided by the user.
+
+The user should specify the channel, the data it wants to send,
+its length (in bytes), and an explicit destination address.
+
+The message will then be sent to the remote processor to which the
+channel belongs, using the channel's src address, and the user-provided
+dst address (thus the channel's dst address will be ignored).
+
+In case there are no TX buffers available, the function will immediately
+return -ENOMEM without waiting until one becomes available.
+
+The function can only be called from a process context (for now).
+Returns 0 on success and an appropriate error value on failure.
+
+::
int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
void *data, int len);
- - sends a message across to the remote processor, using source and
- destination addresses provided by the user.
- The user should specify the channel, the data it wants to send,
- its length (in bytes), and explicit source and destination addresses.
- The message will then be sent to the remote processor to which the
- channel belongs, but the channel's src and dst addresses will be
- ignored (and the user-provided addresses will be used instead).
-
- In case there are no TX buffers available, the function will immediately
- return -ENOMEM without waiting until one becomes available.
- The function can only be called from a process context (for now).
- Returns 0 on success and an appropriate error value on failure.
+
+
+sends a message across to the remote processor, using source and
+destination addresses provided by the user.
+
+The user should specify the channel, the data it wants to send,
+its length (in bytes), and explicit source and destination addresses.
+The message will then be sent to the remote processor to which the
+channel belongs, but the channel's src and dst addresses will be
+ignored (and the user-provided addresses will be used instead).
+
+In case there are no TX buffers available, the function will immediately
+return -ENOMEM without waiting until one becomes available.
+
+The function can only be called from a process context (for now).
+Returns 0 on success and an appropriate error value on failure.
+
+::
struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev,
void (*cb)(struct rpmsg_channel *, void *, int, void *, u32),
void *priv, u32 addr);
- - every rpmsg address in the system is bound to an rx callback (so when
- inbound messages arrive, they are dispatched by the rpmsg bus using the
- appropriate callback handler) by means of an rpmsg_endpoint struct.
-
- This function allows drivers to create such an endpoint, and by that,
- bind a callback, and possibly some private data too, to an rpmsg address
- (either one that is known in advance, or one that will be dynamically
- assigned for them).
-
- Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint
- is already created for them when they are probed by the rpmsg bus
- (using the rx callback they provide when they registered to the rpmsg bus).
-
- So things should just work for simple drivers: they already have an
- endpoint, their rx callback is bound to their rpmsg address, and when
- relevant inbound messages arrive (i.e. messages which their dst address
- equals to the src address of their rpmsg channel), the driver's handler
- is invoked to process it.
-
- That said, more complicated drivers might do need to allocate
- additional rpmsg addresses, and bind them to different rx callbacks.
- To accomplish that, those drivers need to call this function.
- Drivers should provide their channel (so the new endpoint would bind
- to the same remote processor their channel belongs to), an rx callback
- function, an optional private data (which is provided back when the
- rx callback is invoked), and an address they want to bind with the
- callback. If addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will
- dynamically assign them an available rpmsg address (drivers should have
- a very good reason why not to always use RPMSG_ADDR_ANY here).
-
- Returns a pointer to the endpoint on success, or NULL on error.
+
+every rpmsg address in the system is bound to an rx callback (so when
+inbound messages arrive, they are dispatched by the rpmsg bus using the
+appropriate callback handler) by means of an rpmsg_endpoint struct.
+
+This function allows drivers to create such an endpoint, and by that,
+bind a callback, and possibly some private data too, to an rpmsg address
+(either one that is known in advance, or one that will be dynamically
+assigned for them).
+
+Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint
+is already created for them when they are probed by the rpmsg bus
+(using the rx callback they provide when they registered to the rpmsg bus).
+
+So things should just work for simple drivers: they already have an
+endpoint, their rx callback is bound to their rpmsg address, and when
+relevant inbound messages arrive (i.e. messages which their dst address
+equals to the src address of their rpmsg channel), the driver's handler
+is invoked to process it.
+
+That said, more complicated drivers might do need to allocate
+additional rpmsg addresses, and bind them to different rx callbacks.
+To accomplish that, those drivers need to call this function.
+Drivers should provide their channel (so the new endpoint would bind
+to the same remote processor their channel belongs to), an rx callback
+function, an optional private data (which is provided back when the
+rx callback is invoked), and an address they want to bind with the
+callback. If addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will
+dynamically assign them an available rpmsg address (drivers should have
+a very good reason why not to always use RPMSG_ADDR_ANY here).
+
+Returns a pointer to the endpoint on success, or NULL on error.
+
+::
void rpmsg_destroy_ept(struct rpmsg_endpoint *ept);
- - destroys an existing rpmsg endpoint. user should provide a pointer
- to an rpmsg endpoint that was previously created with rpmsg_create_ept().
+
+
+destroys an existing rpmsg endpoint. user should provide a pointer
+to an rpmsg endpoint that was previously created with rpmsg_create_ept().
+
+::
int register_rpmsg_driver(struct rpmsg_driver *rpdrv);
- - registers an rpmsg driver with the rpmsg bus. user should provide
- a pointer to an rpmsg_driver struct, which contains the driver's
- ->probe() and ->remove() functions, an rx callback, and an id_table
- specifying the names of the channels this driver is interested to
- be probed with.
+
+
+registers an rpmsg driver with the rpmsg bus. user should provide
+a pointer to an rpmsg_driver struct, which contains the driver's
+->probe() and ->remove() functions, an rx callback, and an id_table
+specifying the names of the channels this driver is interested to
+be probed with.
+
+::
void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv);
- - unregisters an rpmsg driver from the rpmsg bus. user should provide
- a pointer to a previously-registered rpmsg_driver struct.
- Returns 0 on success, and an appropriate error value on failure.
-3. Typical usage
+unregisters an rpmsg driver from the rpmsg bus. user should provide
+a pointer to a previously-registered rpmsg_driver struct.
+Returns 0 on success, and an appropriate error value on failure.
+
+
+Typical usage
+=============
The following is a simple rpmsg driver, that sends an "hello!" message
on probe(), and whenever it receives an incoming message, it dumps its
content to the console.
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/rpmsg.h>
+::
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/rpmsg.h>
-static void rpmsg_sample_cb(struct rpmsg_channel *rpdev, void *data, int len,
+ static void rpmsg_sample_cb(struct rpmsg_channel *rpdev, void *data, int len,
void *priv, u32 src)
-{
+ {
print_hex_dump(KERN_INFO, "incoming message:", DUMP_PREFIX_NONE,
16, 1, data, len, true);
-}
+ }
-static int rpmsg_sample_probe(struct rpmsg_channel *rpdev)
-{
+ static int rpmsg_sample_probe(struct rpmsg_channel *rpdev)
+ {
int err;
dev_info(&rpdev->dev, "chnl: 0x%x -> 0x%x\n", rpdev->src, rpdev->dst);
@@ -234,32 +291,35 @@ static int rpmsg_sample_probe(struct rpmsg_channel *rpdev)
}
return 0;
-}
+ }
-static void rpmsg_sample_remove(struct rpmsg_channel *rpdev)
-{
+ static void rpmsg_sample_remove(struct rpmsg_channel *rpdev)
+ {
dev_info(&rpdev->dev, "rpmsg sample client driver is removed\n");
-}
+ }
-static struct rpmsg_device_id rpmsg_driver_sample_id_table[] = {
+ static struct rpmsg_device_id rpmsg_driver_sample_id_table[] = {
{ .name = "rpmsg-client-sample" },
{ },
-};
-MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_sample_id_table);
+ };
+ MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_sample_id_table);
-static struct rpmsg_driver rpmsg_sample_client = {
+ static struct rpmsg_driver rpmsg_sample_client = {
.drv.name = KBUILD_MODNAME,
.id_table = rpmsg_driver_sample_id_table,
.probe = rpmsg_sample_probe,
.callback = rpmsg_sample_cb,
.remove = rpmsg_sample_remove,
-};
-module_rpmsg_driver(rpmsg_sample_client);
+ };
+ module_rpmsg_driver(rpmsg_sample_client);
+
+.. note::
-Note: a similar sample which can be built and loaded can be found
-in samples/rpmsg/.
+ a similar sample which can be built and loaded can be found
+ in samples/rpmsg/.
-4. Allocations of rpmsg channels:
+Allocations of rpmsg channels
+=============================
At this point we only support dynamic allocations of rpmsg channels.
diff --git a/Documentation/rtc.txt b/Documentation/rtc.txt
index ddc366026e00..c0c977445fb9 100644
--- a/Documentation/rtc.txt
+++ b/Documentation/rtc.txt
@@ -1,6 +1,6 @@
-
- Real Time Clock (RTC) Drivers for Linux
- =======================================
+=======================================
+Real Time Clock (RTC) Drivers for Linux
+=======================================
When Linux developers talk about a "Real Time Clock", they usually mean
something that tracks wall clock time and is battery backed so that it
@@ -32,8 +32,8 @@ only issue an alarm up to 24 hours in the future, other hardware may
be able to schedule one any time in the upcoming century.
- Old PC/AT-Compatible driver: /dev/rtc
- --------------------------------------
+Old PC/AT-Compatible driver: /dev/rtc
+--------------------------------------
All PCs (even Alpha machines) have a Real Time Clock built into them.
Usually they are built into the chipset of the computer, but some may
@@ -105,8 +105,8 @@ that will be using this driver. See the code at the end of this document.
(The original /dev/rtc driver was written by Paul Gortmaker.)
- New portable "RTC Class" drivers: /dev/rtcN
- --------------------------------------------
+New portable "RTC Class" drivers: /dev/rtcN
+--------------------------------------------
Because Linux supports many non-ACPI and non-PC platforms, some of which
have more than one RTC style clock, it needed a more portable solution
@@ -136,35 +136,39 @@ a high functionality RTC is integrated into the SOC. That system might read
the system clock from the discrete RTC, but use the integrated one for all
other tasks, because of its greater functionality.
-SYSFS INTERFACE
+SYSFS interface
---------------
The sysfs interface under /sys/class/rtc/rtcN provides access to various
rtc attributes without requiring the use of ioctls. All dates and times
are in the RTC's timezone, rather than in system time.
-date: RTC-provided date
-hctosys: 1 if the RTC provided the system time at boot via the
+================ ==============================================================
+date RTC-provided date
+hctosys 1 if the RTC provided the system time at boot via the
CONFIG_RTC_HCTOSYS kernel option, 0 otherwise
-max_user_freq: The maximum interrupt rate an unprivileged user may request
+max_user_freq The maximum interrupt rate an unprivileged user may request
from this RTC.
-name: The name of the RTC corresponding to this sysfs directory
-since_epoch: The number of seconds since the epoch according to the RTC
-time: RTC-provided time
-wakealarm: The time at which the clock will generate a system wakeup
+name The name of the RTC corresponding to this sysfs directory
+since_epoch The number of seconds since the epoch according to the RTC
+time RTC-provided time
+wakealarm The time at which the clock will generate a system wakeup
event. This is a one shot wakeup event, so must be reset
- after wake if a daily wakeup is required. Format is seconds since
- the epoch by default, or if there's a leading +, seconds in the
- future, or if there is a leading +=, seconds ahead of the current
- alarm.
-offset: The amount which the rtc clock has been adjusted in firmware.
+ after wake if a daily wakeup is required. Format is seconds
+ since the epoch by default, or if there's a leading +, seconds
+ in the future, or if there is a leading +=, seconds ahead of
+ the current alarm.
+offset The amount which the rtc clock has been adjusted in firmware.
Visible only if the driver supports clock offset adjustment.
The unit is parts per billion, i.e. The number of clock ticks
which are added to or removed from the rtc's base clock per
billion ticks. A positive value makes a day pass more slowly,
longer, and a negative value makes a day pass more quickly.
+*/nvmem The non volatile storage exported as a raw file, as described
+ in Documentation/nvmem/nvmem.txt
+================ ==============================================================
-IOCTL INTERFACE
+IOCTL interface
---------------
The ioctl() calls supported by /dev/rtc are also supported by the RTC class
diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst
index 0d831a7afe4f..1648fa80b3bf 100644
--- a/Documentation/security/keys/core.rst
+++ b/Documentation/security/keys/core.rst
@@ -894,6 +894,12 @@ The keyctl syscall functions are:
To apply a keyring restriction the process must have Set Attribute
permission and the keyring must not be previously restricted.
+ One application of restricted keyrings is to verify X.509 certificate
+ chains or individual certificate signatures using the asymmetric key type.
+ See Documentation/crypto/asymmetric-keys.txt for specific restrictions
+ applicable to the asymmetric key type.
+
+
Kernel Services
===============
diff --git a/Documentation/sgi-ioc4.txt b/Documentation/sgi-ioc4.txt
index 876c96ae38db..72709222d3c0 100644
--- a/Documentation/sgi-ioc4.txt
+++ b/Documentation/sgi-ioc4.txt
@@ -1,3 +1,7 @@
+====================================
+SGI IOC4 PCI (multi function) device
+====================================
+
The SGI IOC4 PCI device is a bit of a strange beast, so some notes on
it are in order.
diff --git a/Documentation/siphash.txt b/Documentation/siphash.txt
index 908d348ff777..9965821ab333 100644
--- a/Documentation/siphash.txt
+++ b/Documentation/siphash.txt
@@ -1,6 +1,8 @@
- SipHash - a short input PRF
------------------------------------------------
-Written by Jason A. Donenfeld <jason@zx2c4.com>
+===========================
+SipHash - a short input PRF
+===========================
+
+:Author: Written by Jason A. Donenfeld <jason@zx2c4.com>
SipHash is a cryptographically secure PRF -- a keyed hash function -- that
performs very well for short inputs, hence the name. It was designed by
@@ -13,58 +15,61 @@ an input buffer or several input integers. It spits out an integer that is
indistinguishable from random. You may then use that integer as part of secure
sequence numbers, secure cookies, or mask it off for use in a hash table.
-1. Generating a key
+Generating a key
+================
Keys should always be generated from a cryptographically secure source of
-random numbers, either using get_random_bytes or get_random_once:
+random numbers, either using get_random_bytes or get_random_once::
-siphash_key_t key;
-get_random_bytes(&key, sizeof(key));
+ siphash_key_t key;
+ get_random_bytes(&key, sizeof(key));
If you're not deriving your key from here, you're doing it wrong.
-2. Using the functions
+Using the functions
+===================
There are two variants of the function, one that takes a list of integers, and
-one that takes a buffer:
+one that takes a buffer::
-u64 siphash(const void *data, size_t len, const siphash_key_t *key);
+ u64 siphash(const void *data, size_t len, const siphash_key_t *key);
-And:
+And::
-u64 siphash_1u64(u64, const siphash_key_t *key);
-u64 siphash_2u64(u64, u64, const siphash_key_t *key);
-u64 siphash_3u64(u64, u64, u64, const siphash_key_t *key);
-u64 siphash_4u64(u64, u64, u64, u64, const siphash_key_t *key);
-u64 siphash_1u32(u32, const siphash_key_t *key);
-u64 siphash_2u32(u32, u32, const siphash_key_t *key);
-u64 siphash_3u32(u32, u32, u32, const siphash_key_t *key);
-u64 siphash_4u32(u32, u32, u32, u32, const siphash_key_t *key);
+ u64 siphash_1u64(u64, const siphash_key_t *key);
+ u64 siphash_2u64(u64, u64, const siphash_key_t *key);
+ u64 siphash_3u64(u64, u64, u64, const siphash_key_t *key);
+ u64 siphash_4u64(u64, u64, u64, u64, const siphash_key_t *key);
+ u64 siphash_1u32(u32, const siphash_key_t *key);
+ u64 siphash_2u32(u32, u32, const siphash_key_t *key);
+ u64 siphash_3u32(u32, u32, u32, const siphash_key_t *key);
+ u64 siphash_4u32(u32, u32, u32, u32, const siphash_key_t *key);
If you pass the generic siphash function something of a constant length, it
will constant fold at compile-time and automatically choose one of the
optimized functions.
-3. Hashtable key function usage:
+Hashtable key function usage::
-struct some_hashtable {
- DECLARE_HASHTABLE(hashtable, 8);
- siphash_key_t key;
-};
+ struct some_hashtable {
+ DECLARE_HASHTABLE(hashtable, 8);
+ siphash_key_t key;
+ };
-void init_hashtable(struct some_hashtable *table)
-{
- get_random_bytes(&table->key, sizeof(table->key));
-}
+ void init_hashtable(struct some_hashtable *table)
+ {
+ get_random_bytes(&table->key, sizeof(table->key));
+ }
-static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
-{
- return &table->hashtable[siphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
-}
+ static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
+ {
+ return &table->hashtable[siphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+ }
You may then iterate like usual over the returned hash bucket.
-4. Security
+Security
+========
SipHash has a very high security margin, with its 128-bit key. So long as the
key is kept secret, it is impossible for an attacker to guess the outputs of
@@ -73,7 +78,8 @@ is significant.
Linux implements the "2-4" variant of SipHash.
-5. Struct-passing Pitfalls
+Struct-passing Pitfalls
+=======================
Often times the XuY functions will not be large enough, and instead you'll
want to pass a pre-filled struct to siphash. When doing this, it's important
@@ -81,30 +87,32 @@ to always ensure the struct has no padding holes. The easiest way to do this
is to simply arrange the members of the struct in descending order of size,
and to use offsetendof() instead of sizeof() for getting the size. For
performance reasons, if possible, it's probably a good thing to align the
-struct to the right boundary. Here's an example:
-
-const struct {
- struct in6_addr saddr;
- u32 counter;
- u16 dport;
-} __aligned(SIPHASH_ALIGNMENT) combined = {
- .saddr = *(struct in6_addr *)saddr,
- .counter = counter,
- .dport = dport
-};
-u64 h = siphash(&combined, offsetofend(typeof(combined), dport), &secret);
-
-6. Resources
+struct to the right boundary. Here's an example::
+
+ const struct {
+ struct in6_addr saddr;
+ u32 counter;
+ u16 dport;
+ } __aligned(SIPHASH_ALIGNMENT) combined = {
+ .saddr = *(struct in6_addr *)saddr,
+ .counter = counter,
+ .dport = dport
+ };
+ u64 h = siphash(&combined, offsetofend(typeof(combined), dport), &secret);
+
+Resources
+=========
Read the SipHash paper if you're interested in learning more:
https://131002.net/siphash/siphash.pdf
+-------------------------------------------------------------------------------
-~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
-
+===============================================
HalfSipHash - SipHash's insecure younger cousin
------------------------------------------------
-Written by Jason A. Donenfeld <jason@zx2c4.com>
+===============================================
+
+:Author: Written by Jason A. Donenfeld <jason@zx2c4.com>
On the off-chance that SipHash is not fast enough for your needs, you might be
able to justify using HalfSipHash, a terrifying but potentially useful
@@ -120,7 +128,8 @@ then when you can be absolutely certain that the outputs will never be
transmitted out of the kernel. This is only remotely useful over `jhash` as a
means of mitigating hashtable flooding denial of service attacks.
-1. Generating a key
+Generating a key
+================
Keys should always be generated from a cryptographically secure source of
random numbers, either using get_random_bytes or get_random_once:
@@ -130,44 +139,49 @@ get_random_bytes(&key, sizeof(key));
If you're not deriving your key from here, you're doing it wrong.
-2. Using the functions
+Using the functions
+===================
There are two variants of the function, one that takes a list of integers, and
-one that takes a buffer:
+one that takes a buffer::
-u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key);
+ u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key);
-And:
+And::
-u32 hsiphash_1u32(u32, const hsiphash_key_t *key);
-u32 hsiphash_2u32(u32, u32, const hsiphash_key_t *key);
-u32 hsiphash_3u32(u32, u32, u32, const hsiphash_key_t *key);
-u32 hsiphash_4u32(u32, u32, u32, u32, const hsiphash_key_t *key);
+ u32 hsiphash_1u32(u32, const hsiphash_key_t *key);
+ u32 hsiphash_2u32(u32, u32, const hsiphash_key_t *key);
+ u32 hsiphash_3u32(u32, u32, u32, const hsiphash_key_t *key);
+ u32 hsiphash_4u32(u32, u32, u32, u32, const hsiphash_key_t *key);
If you pass the generic hsiphash function something of a constant length, it
will constant fold at compile-time and automatically choose one of the
optimized functions.
-3. Hashtable key function usage:
+Hashtable key function usage
+============================
+
+::
-struct some_hashtable {
- DECLARE_HASHTABLE(hashtable, 8);
- hsiphash_key_t key;
-};
+ struct some_hashtable {
+ DECLARE_HASHTABLE(hashtable, 8);
+ hsiphash_key_t key;
+ };
-void init_hashtable(struct some_hashtable *table)
-{
- get_random_bytes(&table->key, sizeof(table->key));
-}
+ void init_hashtable(struct some_hashtable *table)
+ {
+ get_random_bytes(&table->key, sizeof(table->key));
+ }
-static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
-{
- return &table->hashtable[hsiphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
-}
+ static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
+ {
+ return &table->hashtable[hsiphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+ }
You may then iterate like usual over the returned hash bucket.
-4. Performance
+Performance
+===========
HalfSipHash is roughly 3 times slower than JenkinsHash. For many replacements,
this will not be a problem, as the hashtable lookup isn't the bottleneck. And
diff --git a/Documentation/smsc_ece1099.txt b/Documentation/smsc_ece1099.txt
index 6b492e82b43d..079277421eaf 100644
--- a/Documentation/smsc_ece1099.txt
+++ b/Documentation/smsc_ece1099.txt
@@ -1,3 +1,7 @@
+=================================================
+Msc Keyboard Scan Expansion/GPIO Expansion device
+=================================================
+
What is smsc-ece1099?
----------------------
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
index ef419fd0897f..b83dfa1c0602 100644
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -1,30 +1,34 @@
- Static Keys
- -----------
+===========
+Static Keys
+===========
-DEPRECATED API:
+.. warning::
-The use of 'struct static_key' directly, is now DEPRECATED. In addition
-static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
+ DEPRECATED API:
-struct static_key false = STATIC_KEY_INIT_FALSE;
-struct static_key true = STATIC_KEY_INIT_TRUE;
-static_key_true()
-static_key_false()
+ The use of 'struct static_key' directly, is now DEPRECATED. In addition
+ static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following::
-The updated API replacements are:
+ struct static_key false = STATIC_KEY_INIT_FALSE;
+ struct static_key true = STATIC_KEY_INIT_TRUE;
+ static_key_true()
+ static_key_false()
-DEFINE_STATIC_KEY_TRUE(key);
-DEFINE_STATIC_KEY_FALSE(key);
-DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
-DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
-static_branch_likely()
-static_branch_unlikely()
+ The updated API replacements are::
-0) Abstract
+ DEFINE_STATIC_KEY_TRUE(key);
+ DEFINE_STATIC_KEY_FALSE(key);
+ DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
+ DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
+ static_branch_likely()
+ static_branch_unlikely()
+
+Abstract
+========
Static keys allows the inclusion of seldom used features in
performance-sensitive fast-path kernel code, via a GCC feature and a code
-patching technique. A quick example:
+patching technique. A quick example::
DEFINE_STATIC_KEY_FALSE(key);
@@ -45,7 +49,8 @@ The static_branch_unlikely() branch will be generated into the code with as litt
impact to the likely code path as possible.
-1) Motivation
+Motivation
+==========
Currently, tracepoints are implemented using a conditional branch. The
@@ -60,7 +65,8 @@ possible. Although tracepoints are the original motivation for this work, other
kernel code paths should be able to make use of the static keys facility.
-2) Solution
+Solution
+========
gcc (v4.5) adds a new 'asm goto' statement that allows branching to a label:
@@ -71,7 +77,7 @@ Using the 'asm goto', we can create branches that are either taken or not taken
by default, without the need to check memory. Then, at run-time, we can patch
the branch site to change the branch direction.
-For example, if we have a simple branch that is disabled by default:
+For example, if we have a simple branch that is disabled by default::
if (static_branch_unlikely(&key))
printk("I am the true branch\n");
@@ -87,14 +93,15 @@ optimization.
This lowlevel patching mechanism is called 'jump label patching', and it gives
the basis for the static keys facility.
-3) Static key label API, usage and examples:
+Static key label API, usage and examples
+========================================
-In order to make use of this optimization you must first define a key:
+In order to make use of this optimization you must first define a key::
DEFINE_STATIC_KEY_TRUE(key);
-or:
+or::
DEFINE_STATIC_KEY_FALSE(key);
@@ -102,14 +109,14 @@ or:
The key must be global, that is, it can't be allocated on the stack or dynamically
allocated at run-time.
-The key is then used in code as:
+The key is then used in code as::
if (static_branch_unlikely(&key))
do unlikely code
else
do likely code
-Or:
+Or::
if (static_branch_likely(&key))
do likely code
@@ -120,15 +127,15 @@ Keys defined via DEFINE_STATIC_KEY_TRUE(), or DEFINE_STATIC_KEY_FALSE, may
be used in either static_branch_likely() or static_branch_unlikely()
statements.
-Branch(es) can be set true via:
+Branch(es) can be set true via::
-static_branch_enable(&key);
+ static_branch_enable(&key);
-or false via:
+or false via::
-static_branch_disable(&key);
+ static_branch_disable(&key);
-The branch(es) can then be switched via reference counts:
+The branch(es) can then be switched via reference counts::
static_branch_inc(&key);
...
@@ -142,11 +149,11 @@ static_branch_inc(), will change the branch back to true. Likewise, if the
key is initialized false, a 'static_branch_inc()', will change the branch to
true. And then a 'static_branch_dec()', will again make the branch false.
-Where an array of keys is required, it can be defined as:
+Where an array of keys is required, it can be defined as::
DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
-or:
+or::
DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
@@ -159,96 +166,98 @@ simply fall back to a traditional, load, test, and jump sequence. Also, the
struct jump_entry table must be at least 4-byte aligned because the
static_key->entry field makes use of the two least significant bits.
-* select HAVE_ARCH_JUMP_LABEL, see: arch/x86/Kconfig
-
-* #define JUMP_LABEL_NOP_SIZE, see: arch/x86/include/asm/jump_label.h
+* ``select HAVE_ARCH_JUMP_LABEL``,
+ see: arch/x86/Kconfig
-* __always_inline bool arch_static_branch(struct static_key *key, bool branch), see:
- arch/x86/include/asm/jump_label.h
+* ``#define JUMP_LABEL_NOP_SIZE``,
+ see: arch/x86/include/asm/jump_label.h
-* __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch),
- see: arch/x86/include/asm/jump_label.h
+* ``__always_inline bool arch_static_branch(struct static_key *key, bool branch)``,
+ see: arch/x86/include/asm/jump_label.h
-* void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type),
- see: arch/x86/kernel/jump_label.c
+* ``__always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)``,
+ see: arch/x86/include/asm/jump_label.h
-* __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type),
- see: arch/x86/kernel/jump_label.c
+* ``void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type)``,
+ see: arch/x86/kernel/jump_label.c
+* ``__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type)``,
+ see: arch/x86/kernel/jump_label.c
-* struct jump_entry, see: arch/x86/include/asm/jump_label.h
+* ``struct jump_entry``,
+ see: arch/x86/include/asm/jump_label.h
5) Static keys / jump label analysis, results (x86_64):
As an example, let's add the following branch to 'getppid()', such that the
-system call now looks like:
+system call now looks like::
-SYSCALL_DEFINE0(getppid)
-{
+ SYSCALL_DEFINE0(getppid)
+ {
int pid;
-+ if (static_branch_unlikely(&key))
-+ printk("I am the true branch\n");
+ + if (static_branch_unlikely(&key))
+ + printk("I am the true branch\n");
rcu_read_lock();
pid = task_tgid_vnr(rcu_dereference(current->real_parent));
rcu_read_unlock();
return pid;
-}
-
-The resulting instructions with jump labels generated by GCC is:
-
-ffffffff81044290 <sys_getppid>:
-ffffffff81044290: 55 push %rbp
-ffffffff81044291: 48 89 e5 mov %rsp,%rbp
-ffffffff81044294: e9 00 00 00 00 jmpq ffffffff81044299 <sys_getppid+0x9>
-ffffffff81044299: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax
-ffffffff810442a0: 00 00
-ffffffff810442a2: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax
-ffffffff810442a9: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax
-ffffffff810442b0: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi
-ffffffff810442b7: e8 f4 d9 00 00 callq ffffffff81051cb0 <pid_vnr>
-ffffffff810442bc: 5d pop %rbp
-ffffffff810442bd: 48 98 cltq
-ffffffff810442bf: c3 retq
-ffffffff810442c0: 48 c7 c7 e3 54 98 81 mov $0xffffffff819854e3,%rdi
-ffffffff810442c7: 31 c0 xor %eax,%eax
-ffffffff810442c9: e8 71 13 6d 00 callq ffffffff8171563f <printk>
-ffffffff810442ce: eb c9 jmp ffffffff81044299 <sys_getppid+0x9>
-
-Without the jump label optimization it looks like:
-
-ffffffff810441f0 <sys_getppid>:
-ffffffff810441f0: 8b 05 8a 52 d8 00 mov 0xd8528a(%rip),%eax # ffffffff81dc9480 <key>
-ffffffff810441f6: 55 push %rbp
-ffffffff810441f7: 48 89 e5 mov %rsp,%rbp
-ffffffff810441fa: 85 c0 test %eax,%eax
-ffffffff810441fc: 75 27 jne ffffffff81044225 <sys_getppid+0x35>
-ffffffff810441fe: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax
-ffffffff81044205: 00 00
-ffffffff81044207: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax
-ffffffff8104420e: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax
-ffffffff81044215: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi
-ffffffff8104421c: e8 2f da 00 00 callq ffffffff81051c50 <pid_vnr>
-ffffffff81044221: 5d pop %rbp
-ffffffff81044222: 48 98 cltq
-ffffffff81044224: c3 retq
-ffffffff81044225: 48 c7 c7 13 53 98 81 mov $0xffffffff81985313,%rdi
-ffffffff8104422c: 31 c0 xor %eax,%eax
-ffffffff8104422e: e8 60 0f 6d 00 callq ffffffff81715193 <printk>
-ffffffff81044233: eb c9 jmp ffffffff810441fe <sys_getppid+0xe>
-ffffffff81044235: 66 66 2e 0f 1f 84 00 data32 nopw %cs:0x0(%rax,%rax,1)
-ffffffff8104423c: 00 00 00 00
+ }
+
+The resulting instructions with jump labels generated by GCC is::
+
+ ffffffff81044290 <sys_getppid>:
+ ffffffff81044290: 55 push %rbp
+ ffffffff81044291: 48 89 e5 mov %rsp,%rbp
+ ffffffff81044294: e9 00 00 00 00 jmpq ffffffff81044299 <sys_getppid+0x9>
+ ffffffff81044299: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax
+ ffffffff810442a0: 00 00
+ ffffffff810442a2: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax
+ ffffffff810442a9: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax
+ ffffffff810442b0: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi
+ ffffffff810442b7: e8 f4 d9 00 00 callq ffffffff81051cb0 <pid_vnr>
+ ffffffff810442bc: 5d pop %rbp
+ ffffffff810442bd: 48 98 cltq
+ ffffffff810442bf: c3 retq
+ ffffffff810442c0: 48 c7 c7 e3 54 98 81 mov $0xffffffff819854e3,%rdi
+ ffffffff810442c7: 31 c0 xor %eax,%eax
+ ffffffff810442c9: e8 71 13 6d 00 callq ffffffff8171563f <printk>
+ ffffffff810442ce: eb c9 jmp ffffffff81044299 <sys_getppid+0x9>
+
+Without the jump label optimization it looks like::
+
+ ffffffff810441f0 <sys_getppid>:
+ ffffffff810441f0: 8b 05 8a 52 d8 00 mov 0xd8528a(%rip),%eax # ffffffff81dc9480 <key>
+ ffffffff810441f6: 55 push %rbp
+ ffffffff810441f7: 48 89 e5 mov %rsp,%rbp
+ ffffffff810441fa: 85 c0 test %eax,%eax
+ ffffffff810441fc: 75 27 jne ffffffff81044225 <sys_getppid+0x35>
+ ffffffff810441fe: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax
+ ffffffff81044205: 00 00
+ ffffffff81044207: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax
+ ffffffff8104420e: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax
+ ffffffff81044215: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi
+ ffffffff8104421c: e8 2f da 00 00 callq ffffffff81051c50 <pid_vnr>
+ ffffffff81044221: 5d pop %rbp
+ ffffffff81044222: 48 98 cltq
+ ffffffff81044224: c3 retq
+ ffffffff81044225: 48 c7 c7 13 53 98 81 mov $0xffffffff81985313,%rdi
+ ffffffff8104422c: 31 c0 xor %eax,%eax
+ ffffffff8104422e: e8 60 0f 6d 00 callq ffffffff81715193 <printk>
+ ffffffff81044233: eb c9 jmp ffffffff810441fe <sys_getppid+0xe>
+ ffffffff81044235: 66 66 2e 0f 1f 84 00 data32 nopw %cs:0x0(%rax,%rax,1)
+ ffffffff8104423c: 00 00 00 00
Thus, the disable jump label case adds a 'mov', 'test' and 'jne' instruction
vs. the jump label case just has a 'no-op' or 'jmp 0'. (The jmp 0, is patched
to a 5 byte atomic no-op instruction at boot-time.) Thus, the disabled jump
-label case adds:
+label case adds::
-6 (mov) + 2 (test) + 2 (jne) = 10 - 5 (5 byte jump 0) = 5 addition bytes.
+ 6 (mov) + 2 (test) + 2 (jne) = 10 - 5 (5 byte jump 0) = 5 addition bytes.
If we then include the padding bytes, the jump label code saves, 16 total bytes
of instruction memory for this small function. In this case the non-jump label
@@ -262,7 +271,7 @@ Since there are a number of static key API uses in the scheduler paths,
'pipe-test' (also known as 'perf bench sched pipe') can be used to show the
performance improvement. Testing done on 3.3.0-rc2:
-jump label disabled:
+jump label disabled::
Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs):
@@ -279,7 +288,7 @@ jump label disabled:
1.601607384 seconds time elapsed ( +- 0.07% )
-jump label enabled:
+jump label enabled::
Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs):
diff --git a/Documentation/svga.txt b/Documentation/svga.txt
index cd66ec836e4f..119f1515b1ac 100644
--- a/Documentation/svga.txt
+++ b/Documentation/svga.txt
@@ -1,24 +1,31 @@
- Video Mode Selection Support 2.13
- (c) 1995--1999 Martin Mares, <mj@ucw.cz>
---------------------------------------------------------------------------------
+.. include:: <isonum.txt>
-1. Intro
-~~~~~~~~
- This small document describes the "Video Mode Selection" feature which
+=================================
+Video Mode Selection Support 2.13
+=================================
+
+:Copyright: |copy| 1995--1999 Martin Mares, <mj@ucw.cz>
+
+Intro
+~~~~~
+
+This small document describes the "Video Mode Selection" feature which
allows the use of various special video modes supported by the video BIOS. Due
to usage of the BIOS, the selection is limited to boot time (before the
kernel decompression starts) and works only on 80X86 machines.
- ** Short intro for the impatient: Just use vga=ask for the first time,
- ** enter `scan' on the video mode prompt, pick the mode you want to use,
- ** remember its mode ID (the four-digit hexadecimal number) and then
- ** set the vga parameter to this number (converted to decimal first).
+.. note::
- The video mode to be used is selected by a kernel parameter which can be
+ Short intro for the impatient: Just use vga=ask for the first time,
+ enter ``scan`` on the video mode prompt, pick the mode you want to use,
+ remember its mode ID (the four-digit hexadecimal number) and then
+ set the vga parameter to this number (converted to decimal first).
+
+The video mode to be used is selected by a kernel parameter which can be
specified in the kernel Makefile (the SVGA_MODE=... line) or by the "vga=..."
option of LILO (or some other boot loader you use) or by the "vidmode" utility
(present in standard Linux utility packages). You can use the following values
-of this parameter:
+of this parameter::
NORMAL_VGA - Standard 80x25 mode available on all display adapters.
@@ -37,77 +44,79 @@ of this parameter:
for exact meaning of the ID). Warning: rdev and LILO don't support
hexadecimal numbers -- you have to convert it to decimal manually.
-2. Menu
-~~~~~~~
- The ASK_VGA mode causes the kernel to offer a video mode menu upon
+Menu
+~~~~
+
+The ASK_VGA mode causes the kernel to offer a video mode menu upon
bootup. It displays a "Press <RETURN> to see video modes available, <SPACE>
to continue or wait 30 secs" message. If you press <RETURN>, you enter the
menu, if you press <SPACE> or wait 30 seconds, the kernel will boot up in
the standard 80x25 mode.
- The menu looks like:
+The menu looks like::
-Video adapter: <name-of-detected-video-adapter>
-Mode: COLSxROWS:
-0 0F00 80x25
-1 0F01 80x50
-2 0F02 80x43
-3 0F03 80x26
-....
-Enter mode number or `scan': <flashing-cursor-here>
+ Video adapter: <name-of-detected-video-adapter>
+ Mode: COLSxROWS:
+ 0 0F00 80x25
+ 1 0F01 80x50
+ 2 0F02 80x43
+ 3 0F03 80x26
+ ....
+ Enter mode number or ``scan``: <flashing-cursor-here>
- <name-of-detected-video-adapter> tells what video adapter did Linux detect
+<name-of-detected-video-adapter> tells what video adapter did Linux detect
-- it's either a generic adapter name (MDA, CGA, HGC, EGA, VGA, VESA VGA [a VGA
with VESA-compliant BIOS]) or a chipset name (e.g., Trident). Direct detection
of chipsets is turned off by default (see CONFIG_VIDEO_SVGA in chapter 4 to see
how to enable it if you really want) as it's inherently unreliable due to
absolutely insane PC design.
- "0 0F00 80x25" means that the first menu item (the menu items are numbered
+"0 0F00 80x25" means that the first menu item (the menu items are numbered
from "0" to "9" and from "a" to "z") is a 80x25 mode with ID=0x0f00 (see the
next section for a description of mode IDs).
- <flashing-cursor-here> encourages you to enter the item number or mode ID
+<flashing-cursor-here> encourages you to enter the item number or mode ID
you wish to set and press <RETURN>. If the computer complains something about
"Unknown mode ID", it is trying to tell you that it isn't possible to set such
a mode. It's also possible to press only <RETURN> which leaves the current mode.
- The mode list usually contains a few basic modes and some VESA modes. In
+The mode list usually contains a few basic modes and some VESA modes. In
case your chipset has been detected, some chipset-specific modes are shown as
well (some of these might be missing or unusable on your machine as different
BIOSes are often shipped with the same card and the mode numbers depend purely
on the VGA BIOS).
- The modes displayed on the menu are partially sorted: The list starts with
+The modes displayed on the menu are partially sorted: The list starts with
the standard modes (80x25 and 80x50) followed by "special" modes (80x28 and
80x43), local modes (if the local modes feature is enabled), VESA modes and
finally SVGA modes for the auto-detected adapter.
- If you are not happy with the mode list offered (e.g., if you think your card
+If you are not happy with the mode list offered (e.g., if you think your card
is able to do more), you can enter "scan" instead of item number / mode ID. The
program will try to ask the BIOS for all possible video mode numbers and test
what happens then. The screen will be probably flashing wildly for some time and
strange noises will be heard from inside the monitor and so on and then, really
all consistent video modes supported by your BIOS will appear (plus maybe some
-`ghost modes'). If you are afraid this could damage your monitor, don't use this
-function.
+``ghost modes``). If you are afraid this could damage your monitor, don't use
+this function.
- After scanning, the mode ordering is a bit different: the auto-detected SVGA
-modes are not listed at all and the modes revealed by `scan' are shown before
+After scanning, the mode ordering is a bit different: the auto-detected SVGA
+modes are not listed at all and the modes revealed by ``scan`` are shown before
all VESA modes.
-3. Mode IDs
-~~~~~~~~~~~
- Because of the complexity of all the video stuff, the video mode IDs
+Mode IDs
+~~~~~~~~
+
+Because of the complexity of all the video stuff, the video mode IDs
used here are also a bit complex. A video mode ID is a 16-bit number usually
expressed in a hexadecimal notation (starting with "0x"). You can set a mode
by entering its mode directly if you know it even if it isn't shown on the menu.
-The ID numbers can be divided to three regions:
+The ID numbers can be divided to those regions::
0x0000 to 0x00ff - menu item references. 0x0000 is the first item. Don't use
outside the menu as this can change from boot to boot (especially if you
- have used the `scan' feature).
+ have used the ``scan`` feature).
0x0100 to 0x017f - standard BIOS modes. The ID is a BIOS video mode number
(as presented to INT 10, function 00) increased by 0x0100.
@@ -142,53 +151,54 @@ The ID numbers can be divided to three regions:
0xffff equivalent to 0x0f00 (standard 80x25)
0xfffe equivalent to 0x0f01 (EGA 80x43 or VGA 80x50)
- If you add 0x8000 to the mode ID, the program will try to recalculate
+If you add 0x8000 to the mode ID, the program will try to recalculate
vertical display timing according to mode parameters, which can be used to
eliminate some annoying bugs of certain VGA BIOSes (usually those used for
cards with S3 chipsets and old Cirrus Logic BIOSes) -- mainly extra lines at the
end of the display.
-4. Options
-~~~~~~~~~~
- Some options can be set in the source text (in arch/i386/boot/video.S).
+Options
+~~~~~~~
+
+Some options can be set in the source text (in arch/i386/boot/video.S).
All of them are simple #define's -- change them to #undef's when you want to
switch them off. Currently supported:
- CONFIG_VIDEO_SVGA - enables autodetection of SVGA cards. This is switched
+CONFIG_VIDEO_SVGA - enables autodetection of SVGA cards. This is switched
off by default as it's a bit unreliable due to terribly bad PC design. If you
-really want to have the adapter autodetected (maybe in case the `scan' feature
+really want to have the adapter autodetected (maybe in case the ``scan`` feature
doesn't work on your machine), switch this on and don't cry if the results
are not completely sane. In case you really need this feature, please drop me
a mail as I think of removing it some day.
- CONFIG_VIDEO_VESA - enables autodetection of VESA modes. If it doesn't work
+CONFIG_VIDEO_VESA - enables autodetection of VESA modes. If it doesn't work
on your machine (or displays a "Error: Scanning of VESA modes failed" message),
you can switch it off and report as a bug.
- CONFIG_VIDEO_COMPACT - enables compacting of the video mode list. If there
+CONFIG_VIDEO_COMPACT - enables compacting of the video mode list. If there
are more modes with the same screen size, only the first one is kept (see above
for more info on mode ordering). However, in very strange cases it's possible
that the first "version" of the mode doesn't work although some of the others
do -- in this case turn this switch off to see the rest.
- CONFIG_VIDEO_RETAIN - enables retaining of screen contents when switching
+CONFIG_VIDEO_RETAIN - enables retaining of screen contents when switching
video modes. Works only with some boot loaders which leave enough room for the
buffer. (If you have old LILO, you can adjust heap_end_ptr and loadflags
in setup.S, but it's better to upgrade the boot loader...)
- CONFIG_VIDEO_LOCAL - enables inclusion of "local modes" in the list. The
+CONFIG_VIDEO_LOCAL - enables inclusion of "local modes" in the list. The
local modes are added automatically to the beginning of the list not depending
on hardware configuration. The local modes are listed in the source text after
the "local_mode_table:" line. The comment before this line describes the format
of the table (which also includes a video card name to be displayed on the
top of the menu).
- CONFIG_VIDEO_400_HACK - force setting of 400 scan lines for standard VGA
+CONFIG_VIDEO_400_HACK - force setting of 400 scan lines for standard VGA
modes. This option is intended to be used on certain buggy BIOSes which draw
some useless logo using font download and then fail to reset the correct mode.
Don't use unless needed as it forces resetting the video card.
- CONFIG_VIDEO_GFX_HACK - includes special hack for setting of graphics modes
+CONFIG_VIDEO_GFX_HACK - includes special hack for setting of graphics modes
to be used later by special drivers (e.g., 800x600 on IBM ThinkPad -- see
ftp://ftp.phys.keio.ac.jp/pub/XFree86/800x600/XF86Configs/XF86Config.IBM_TP560).
Allows to set _any_ BIOS mode including graphic ones and forcing specific
@@ -196,33 +206,36 @@ text screen resolution instead of peeking it from BIOS variables. Don't use
unless you think you know what you're doing. To activate this setup, use
mode number 0x0f08 (see section 3).
-5. Still doesn't work?
-~~~~~~~~~~~~~~~~~~~~~~
- When the mode detection doesn't work (e.g., the mode list is incorrect or
+Still doesn't work?
+~~~~~~~~~~~~~~~~~~~
+
+When the mode detection doesn't work (e.g., the mode list is incorrect or
the machine hangs instead of displaying the menu), try to switch off some of
the configuration options listed in section 4. If it fails, you can still use
your kernel with the video mode set directly via the kernel parameter.
- In either case, please send me a bug report containing what _exactly_
+In either case, please send me a bug report containing what _exactly_
happens and how do the configuration switches affect the behaviour of the bug.
- If you start Linux from M$-DOS, you might also use some DOS tools for
+If you start Linux from M$-DOS, you might also use some DOS tools for
video mode setting. In this case, you must specify the 0x0f04 mode ("leave
current settings") to Linux, because if you don't and you use any non-standard
mode, Linux will switch to 80x25 automatically.
- If you set some extended mode and there's one or more extra lines on the
+If you set some extended mode and there's one or more extra lines on the
bottom of the display containing already scrolled-out text, your VGA BIOS
contains the most common video BIOS bug called "incorrect vertical display
end setting". Adding 0x8000 to the mode ID might fix the problem. Unfortunately,
this must be done manually -- no autodetection mechanisms are available.
- If you have a VGA card and your display still looks as on EGA, your BIOS
+If you have a VGA card and your display still looks as on EGA, your BIOS
is probably broken and you need to set the CONFIG_VIDEO_400_HACK switch to
force setting of the correct mode.
-6. History
-~~~~~~~~~~
+History
+~~~~~~~
+
+=============== ================================================================
1.0 (??-Nov-95) First version supporting all adapters supported by the old
setup.S + Cirrus Logic 54XX. Present in some 1.3.4? kernels
and then removed due to instability on some machines.
@@ -260,17 +273,18 @@ force setting of the correct mode.
original version written by hhanemaa@cs.ruu.nl, patched by
Jeff Chua, rewritten by me).
- Screen store/restore fixed.
-2.8 (14-Apr-96) - Previous release was not compilable without CONFIG_VIDEO_SVGA.
+2.8 (14-Apr-96) - Previous release was not compilable without CONFIG_VIDEO_SVGA.
- Better recognition of text modes during mode scan.
2.9 (12-May-96) - Ignored VESA modes 0x80 - 0xff (more VESA BIOS bugs!)
-2.10 (11-Nov-96)- The whole thing made optional.
+2.10(11-Nov-96) - The whole thing made optional.
- Added the CONFIG_VIDEO_400_HACK switch.
- Added the CONFIG_VIDEO_GFX_HACK switch.
- Code cleanup.
-2.11 (03-May-97)- Yet another cleanup, now including also the documentation.
- - Direct testing of SVGA adapters turned off by default, `scan'
+2.11(03-May-97) - Yet another cleanup, now including also the documentation.
+ - Direct testing of SVGA adapters turned off by default, ``scan``
offered explicitly on the prompt line.
- Removed the doc section describing adding of new probing
functions as I try to get rid of _all_ hardware probing here.
-2.12 (25-May-98)- Added support for VESA frame buffer graphics.
-2.13 (14-May-99)- Minor documentation fixes.
+2.12(25-May-98) Added support for VESA frame buffer graphics.
+2.13(14-May-99) Minor documentation fixes.
+=============== ================================================================
diff --git a/Documentation/tee.txt b/Documentation/tee.txt
index 718599357596..56ea85ffebf2 100644
--- a/Documentation/tee.txt
+++ b/Documentation/tee.txt
@@ -1,4 +1,7 @@
+=============
TEE subsystem
+=============
+
This document describes the TEE subsystem in Linux.
A TEE (Trusted Execution Environment) is a trusted OS running in some
@@ -80,27 +83,27 @@ The GlobalPlatform TEE Client API [5] is implemented on top of the generic
TEE API.
Picture of the relationship between the different components in the
-OP-TEE architecture.
-
- User space Kernel Secure world
- ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~
- +--------+ +-------------+
- | Client | | Trusted |
- +--------+ | Application |
- /\ +-------------+
- || +----------+ /\
- || |tee- | ||
- || |supplicant| \/
- || +----------+ +-------------+
- \/ /\ | TEE Internal|
- +-------+ || | API |
- + TEE | || +--------+--------+ +-------------+
- | Client| || | TEE | OP-TEE | | OP-TEE |
- | API | \/ | subsys | driver | | Trusted OS |
- +-------+----------------+----+-------+----+-----------+-------------+
- | Generic TEE API | | OP-TEE MSG |
- | IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) |
- +-----------------------------+ +------------------------------+
+OP-TEE architecture::
+
+ User space Kernel Secure world
+ ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~
+ +--------+ +-------------+
+ | Client | | Trusted |
+ +--------+ | Application |
+ /\ +-------------+
+ || +----------+ /\
+ || |tee- | ||
+ || |supplicant| \/
+ || +----------+ +-------------+
+ \/ /\ | TEE Internal|
+ +-------+ || | API |
+ + TEE | || +--------+--------+ +-------------+
+ | Client| || | TEE | OP-TEE | | OP-TEE |
+ | API | \/ | subsys | driver | | Trusted OS |
+ +-------+----------------+----+-------+----+-----------+-------------+
+ | Generic TEE API | | OP-TEE MSG |
+ | IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) |
+ +-----------------------------+ +------------------------------+
RPC (Remote Procedure Call) are requests from secure world to kernel driver
or tee-supplicant. An RPC is identified by a special range of SMCCC return
@@ -109,10 +112,16 @@ kernel are handled by the kernel driver. Other RPC messages will be forwarded to
tee-supplicant without further involvement of the driver, except switching
shared memory buffer representation.
-References:
+References
+==========
+
[1] https://github.com/OP-TEE/optee_os
+
[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+
[3] drivers/tee/optee/optee_smc.h
+
[4] drivers/tee/optee/optee_msg.h
+
[5] http://www.globalplatform.org/specificationsdevice.asp look for
"TEE Client API Specification v1.0" and click download.
diff --git a/Documentation/this_cpu_ops.txt b/Documentation/this_cpu_ops.txt
index 2cbf71975381..5cb8b883ae83 100644
--- a/Documentation/this_cpu_ops.txt
+++ b/Documentation/this_cpu_ops.txt
@@ -1,5 +1,9 @@
+===================
this_cpu operations
--------------------
+===================
+
+:Author: Christoph Lameter, August 4th, 2014
+:Author: Pranith Kumar, Aug 2nd, 2014
this_cpu operations are a way of optimizing access to per cpu
variables associated with the *currently* executing processor. This is
@@ -39,7 +43,7 @@ operations.
The following this_cpu() operations with implied preemption protection
are defined. These operations can be used without worrying about
-preemption and interrupts.
+preemption and interrupts::
this_cpu_read(pcp)
this_cpu_write(pcp, val)
@@ -67,14 +71,14 @@ to relocate a per cpu relative address to the proper per cpu area for
the processor. So the relocation to the per cpu base is encoded in the
instruction via a segment register prefix.
-For example:
+For example::
DEFINE_PER_CPU(int, x);
int z;
z = this_cpu_read(x);
-results in a single instruction
+results in a single instruction::
mov ax, gs:[x]
@@ -84,16 +88,16 @@ this_cpu_ops such sequence also required preempt disable/enable to
prevent the kernel from moving the thread to a different processor
while the calculation is performed.
-Consider the following this_cpu operation:
+Consider the following this_cpu operation::
this_cpu_inc(x)
-The above results in the following single instruction (no lock prefix!)
+The above results in the following single instruction (no lock prefix!)::
inc gs:[x]
instead of the following operations required if there is no segment
-register:
+register::
int *y;
int cpu;
@@ -121,8 +125,10 @@ has to be paid for this optimization is the need to add up the per cpu
counters when the value of a counter is needed.
-Special operations:
--------------------
+Special operations
+------------------
+
+::
y = this_cpu_ptr(&x)
@@ -153,11 +159,15 @@ Therefore the use of x or &x outside of the context of per cpu
operations is invalid and will generally be treated like a NULL
pointer dereference.
+::
+
DEFINE_PER_CPU(int, x);
In the context of per cpu operations the above implies that x is a per
cpu variable. Most this_cpu operations take a cpu variable.
+::
+
int __percpu *p = &x;
&x and hence p is the *offset* of a per cpu variable. this_cpu_ptr()
@@ -168,7 +178,7 @@ strange.
Operations on a field of a per cpu structure
--------------------------------------------
-Let's say we have a percpu structure
+Let's say we have a percpu structure::
struct s {
int n,m;
@@ -177,14 +187,14 @@ Let's say we have a percpu structure
DEFINE_PER_CPU(struct s, p);
-Operations on these fields are straightforward
+Operations on these fields are straightforward::
this_cpu_inc(p.m)
z = this_cpu_cmpxchg(p.m, 0, 1);
-If we have an offset to struct s:
+If we have an offset to struct s::
struct s __percpu *ps = &p;
@@ -194,7 +204,7 @@ If we have an offset to struct s:
The calculation of the pointer may require the use of this_cpu_ptr()
-if we do not make use of this_cpu ops later to manipulate fields:
+if we do not make use of this_cpu ops later to manipulate fields::
struct s *pp;
@@ -206,7 +216,7 @@ if we do not make use of this_cpu ops later to manipulate fields:
Variants of this_cpu ops
--------------------------
+------------------------
this_cpu ops are interrupt safe. Some architectures do not support
these per cpu local operations. In that case the operation must be
@@ -222,7 +232,7 @@ preemption. If a per cpu variable is not used in an interrupt context
and the scheduler cannot preempt, then they are safe. If any interrupts
still occur while an operation is in progress and if the interrupt too
modifies the variable, then RMW actions can not be guaranteed to be
-safe.
+safe::
__this_cpu_read(pcp)
__this_cpu_write(pcp, val)
@@ -279,7 +289,7 @@ unless absolutely necessary. Please consider using an IPI to wake up
the remote CPU and perform the update to its per cpu area.
To access per-cpu data structure remotely, typically the per_cpu_ptr()
-function is used:
+function is used::
DEFINE_PER_CPU(struct data, datap);
@@ -289,7 +299,7 @@ function is used:
This makes it explicit that we are getting ready to access a percpu
area remotely.
-You can also do the following to convert the datap offset to an address
+You can also do the following to convert the datap offset to an address::
struct data *p = this_cpu_ptr(&datap);
@@ -305,7 +315,7 @@ the following scenario that occurs because two per cpu variables
share a cache-line but the relaxed synchronization is applied to
only one process updating the cache-line.
-Consider the following example
+Consider the following example::
struct test {
@@ -327,6 +337,3 @@ mind that a remote write will evict the cache line from the processor
that most likely will access it. If the processor wakes up and finds a
missing local cache line of a per cpu area, its performance and hence
the wake up times will be affected.
-
-Christoph Lameter, August 4th, 2014
-Pranith Kumar, Aug 2nd, 2014
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index fff8ff6d4893..d4601df6e72e 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -5,10 +5,11 @@ Copyright 2008 Red Hat Inc.
Author: Steven Rostedt <srostedt@redhat.com>
License: The GNU Free Documentation License, Version 1.2
(dual licensed under the GPL v2)
-Reviewers: Elias Oltmanns, Randy Dunlap, Andrew Morton,
- John Kacur, and David Teigland.
+Original Reviewers: Elias Oltmanns, Randy Dunlap, Andrew Morton,
+ John Kacur, and David Teigland.
Written for: 2.6.28-rc2
Updated for: 3.10
+Updated for: 4.13 - Copyright 2017 VMware Inc. Steven Rostedt
Introduction
------------
@@ -26,9 +27,11 @@ a task is woken to the task is actually scheduled in.
One of the most common uses of ftrace is the event tracing.
Through out the kernel is hundreds of static event points that
-can be enabled via the debugfs file system to see what is
+can be enabled via the tracefs file system to see what is
going on in certain parts of the kernel.
+See events.txt for more information.
+
Implementation Details
----------------------
@@ -39,34 +42,47 @@ See ftrace-design.txt for details for arch porters and such.
The File System
---------------
-Ftrace uses the debugfs file system to hold the control files as
+Ftrace uses the tracefs file system to hold the control files as
well as the files to display output.
-When debugfs is configured into the kernel (which selecting any ftrace
-option will do) the directory /sys/kernel/debug will be created. To mount
+When tracefs is configured into the kernel (which selecting any ftrace
+option will do) the directory /sys/kernel/tracing will be created. To mount
this directory, you can add to your /etc/fstab file:
- debugfs /sys/kernel/debug debugfs defaults 0 0
+ tracefs /sys/kernel/tracing tracefs defaults 0 0
Or you can mount it at run time with:
- mount -t debugfs nodev /sys/kernel/debug
+ mount -t tracefs nodev /sys/kernel/tracing
For quicker access to that directory you may want to make a soft link to
it:
- ln -s /sys/kernel/debug /debug
+ ln -s /sys/kernel/tracing /tracing
+
+ *** NOTICE ***
+
+Before 4.1, all ftrace tracing control files were within the debugfs
+file system, which is typically located at /sys/kernel/debug/tracing.
+For backward compatibility, when mounting the debugfs file system,
+the tracefs file system will be automatically mounted at:
+
+ /sys/kernel/debug/tracing
-Any selected ftrace option will also create a directory called tracing
-within the debugfs. The rest of the document will assume that you are in
-the ftrace directory (cd /sys/kernel/debug/tracing) and will only concentrate
-on the files within that directory and not distract from the content with
-the extended "/sys/kernel/debug/tracing" path name.
+All files located in the tracefs file system will be located in that
+debugfs file system directory as well.
+
+ *** NOTICE ***
+
+Any selected ftrace option will also create the tracefs file system.
+The rest of the document will assume that you are in the ftrace directory
+(cd /sys/kernel/tracing) and will only concentrate on the files within that
+directory and not distract from the content with the extended
+"/sys/kernel/tracing" path name.
That's it! (assuming that you have ftrace configured into your kernel)
-After mounting debugfs, you can see a directory called
-"tracing". This directory contains the control and output files
+After mounting tracefs you will have access to the control and output files
of ftrace. Here is a list of some of the key files:
@@ -92,10 +108,20 @@ of ftrace. Here is a list of some of the key files:
writing to the ring buffer, the tracing overhead may
still be occurring.
+ The kernel function tracing_off() can be used within the
+ kernel to disable writing to the ring buffer, which will
+ set this file to "0". User space can re-enable tracing by
+ echoing "1" into the file.
+
+ Note, the function and event trigger "traceoff" will also
+ set this file to zero and stop tracing. Which can also
+ be re-enabled by user space using this file.
+
trace:
This file holds the output of the trace in a human
- readable format (described below).
+ readable format (described below). Note, tracing is temporarily
+ disabled while this file is being read (opened).
trace_pipe:
@@ -109,7 +135,8 @@ of ftrace. Here is a list of some of the key files:
will not be read again with a sequential read. The
"trace" file is static, and if the tracer is not
adding more data, it will display the same
- information every time it is read.
+ information every time it is read. This file will not
+ disable tracing while being read.
trace_options:
@@ -128,12 +155,14 @@ of ftrace. Here is a list of some of the key files:
tracing_max_latency:
Some of the tracers record the max latency.
- For example, the time interrupts are disabled.
- This time is saved in this file. The max trace
- will also be stored, and displayed by "trace".
- A new max trace will only be recorded if the
- latency is greater than the value in this
- file. (in microseconds)
+ For example, the maximum time that interrupts are disabled.
+ The maximum time is saved in this file. The max trace will also be
+ stored, and displayed by "trace". A new max trace will only be
+ recorded if the latency is greater than the value in this file
+ (in microseconds).
+
+ By echoing in a time into this file, no latency will be recorded
+ unless it is greater than the time in this file.
tracing_thresh:
@@ -152,32 +181,34 @@ of ftrace. Here is a list of some of the key files:
that the kernel uses for allocation, usually 4 KB in size).
If the last page allocated has room for more bytes
than requested, the rest of the page will be used,
- making the actual allocation bigger than requested.
+ making the actual allocation bigger than requested or shown.
( Note, the size may not be a multiple of the page size
due to buffer management meta-data. )
+ Buffer sizes for individual CPUs may vary
+ (see "per_cpu/cpu0/buffer_size_kb" below), and if they do
+ this file will show "X".
+
buffer_total_size_kb:
This displays the total combined size of all the trace buffers.
free_buffer:
- If a process is performing the tracing, and the ring buffer
- should be shrunk "freed" when the process is finished, even
- if it were to be killed by a signal, this file can be used
- for that purpose. On close of this file, the ring buffer will
- be resized to its minimum size. Having a process that is tracing
- also open this file, when the process exits its file descriptor
- for this file will be closed, and in doing so, the ring buffer
- will be "freed".
+ If a process is performing tracing, and the ring buffer should be
+ shrunk "freed" when the process is finished, even if it were to be
+ killed by a signal, this file can be used for that purpose. On close
+ of this file, the ring buffer will be resized to its minimum size.
+ Having a process that is tracing also open this file, when the process
+ exits its file descriptor for this file will be closed, and in doing so,
+ the ring buffer will be "freed".
It may also stop tracing if disable_on_free option is set.
tracing_cpumask:
- This is a mask that lets the user only trace
- on specified CPUs. The format is a hex string
- representing the CPUs.
+ This is a mask that lets the user only trace on specified CPUs.
+ The format is a hex string representing the CPUs.
set_ftrace_filter:
@@ -190,6 +221,9 @@ of ftrace. Here is a list of some of the key files:
to be traced. Echoing names of functions into this file
will limit the trace to only those functions.
+ The functions listed in "available_filter_functions" are what
+ can be written into this file.
+
This interface also allows for commands to be used. See the
"Filter commands" section for more details.
@@ -202,7 +236,14 @@ of ftrace. Here is a list of some of the key files:
set_ftrace_pid:
- Have the function tracer only trace a single thread.
+ Have the function tracer only trace the threads whose PID are
+ listed in this file.
+
+ If the "function-fork" option is set, then when a task whose
+ PID is listed in this file forks, the child's PID will
+ automatically be added to this file, and the child will be
+ traced by the function tracer as well. This option will also
+ cause PIDs of tasks that exit to be removed from the file.
set_event_pid:
@@ -217,17 +258,28 @@ of ftrace. Here is a list of some of the key files:
set_graph_function:
- Set a "trigger" function where tracing should start
- with the function graph tracer (See the section
- "dynamic ftrace" for more details).
+ Functions listed in this file will cause the function graph
+ tracer to only trace these functions and the functions that
+ they call. (See the section "dynamic ftrace" for more details).
+
+ set_graph_notrace:
+
+ Similar to set_graph_function, but will disable function graph
+ tracing when the function is hit until it exits the function.
+ This makes it possible to ignore tracing functions that are called
+ by a specific function.
available_filter_functions:
- This lists the functions that ftrace
- has processed and can trace. These are the function
- names that you can pass to "set_ftrace_filter" or
- "set_ftrace_notrace". (See the section "dynamic ftrace"
- below for more details.)
+ This lists the functions that ftrace has processed and can trace.
+ These are the function names that you can pass to
+ "set_ftrace_filter" or "set_ftrace_notrace".
+ (See the section "dynamic ftrace" below for more details.)
+
+ dyn_ftrace_total_info:
+
+ This file is for debugging purposes. The number of functions that
+ have been converted to nops and are available to be traced.
enabled_functions:
@@ -250,12 +302,21 @@ of ftrace. Here is a list of some of the key files:
an 'I' will be displayed on the same line as the function that
can be overridden.
+ If the architecture supports it, it will also show what callback
+ is being directly called by the function. If the count is greater
+ than 1 it most likely will be ftrace_ops_list_func().
+
+ If the callback of the function jumps to a trampoline that is
+ specific to a the callback and not the standard trampoline,
+ its address will be printed as well as the function that the
+ trampoline calls.
+
function_profile_enabled:
When set it will enable all functions with either the function
- tracer, or if enabled, the function graph tracer. It will
+ tracer, or if configured, the function graph tracer. It will
keep a histogram of the number of functions that were called
- and if run with the function graph tracer, it will also keep
+ and if the function graph tracer was configured, it will also keep
track of the time spent in those functions. The histogram
content can be displayed in the files:
@@ -283,12 +344,11 @@ of ftrace. Here is a list of some of the key files:
printk_formats:
This is for tools that read the raw format files. If an event in
- the ring buffer references a string (currently only trace_printk()
- does this), only a pointer to the string is recorded into the buffer
- and not the string itself. This prevents tools from knowing what
- that string was. This file displays the string and address for
- the string allowing tools to map the pointers to what the
- strings were.
+ the ring buffer references a string, only a pointer to the string
+ is recorded into the buffer and not the string itself. This prevents
+ tools from knowing what that string was. This file displays the string
+ and address for the string allowing tools to map the pointers to what
+ the strings were.
saved_cmdlines:
@@ -298,6 +358,22 @@ of ftrace. Here is a list of some of the key files:
comms for events. If a pid for a comm is not listed, then
"<...>" is displayed in the output.
+ If the option "record-cmd" is set to "0", then comms of tasks
+ will not be saved during recording. By default, it is enabled.
+
+ saved_cmdlines_size:
+
+ By default, 128 comms are saved (see "saved_cmdlines" above). To
+ increase or decrease the amount of comms that are cached, echo
+ in a the number of comms to cache, into this file.
+
+ saved_tgids:
+
+ If the option "record-tgid" is set, on each scheduling context switch
+ the Task Group ID of a task is saved in a table mapping the PID of
+ the thread to its TGID. By default, the "record-tgid" option is
+ disabled.
+
snapshot:
This displays the "snapshot" buffer and also lets the user
@@ -336,6 +412,9 @@ of ftrace. Here is a list of some of the key files:
# cat trace_clock
[local] global counter x86-tsc
+ The clock with the square brackets around it is the one
+ in effect.
+
local: Default clock, but may not be in sync across CPUs
global: This clock is in sync with all CPUs but may
@@ -448,6 +527,23 @@ of ftrace. Here is a list of some of the key files:
See events.txt for more information.
+ set_event:
+
+ By echoing in the event into this file, will enable that event.
+
+ See events.txt for more information.
+
+ available_events:
+
+ A list of events that can be enabled in tracing.
+
+ See events.txt for more information.
+
+ hwlat_detector:
+
+ Directory for the Hardware Latency Detector.
+ See "Hardware Latency Detector" section below.
+
per_cpu:
This is a directory that contains the trace per_cpu information.
@@ -539,13 +635,25 @@ Here is the list of current tracers that may be configured.
to draw a graph of function calls similar to C code
source.
+ "blk"
+
+ The block tracer. The tracer used by the blktrace user
+ application.
+
+ "hwlat"
+
+ The Hardware Latency tracer is used to detect if the hardware
+ produces any latency. See "Hardware Latency Detector" section
+ below.
+
"irqsoff"
Traces the areas that disable interrupts and saves
the trace with the longest max latency.
See tracing_max_latency. When a new max is recorded,
it replaces the old trace. It is best to view this
- trace with the latency-format option enabled.
+ trace with the latency-format option enabled, which
+ happens automatically when the tracer is selected.
"preemptoff"
@@ -571,6 +679,26 @@ Here is the list of current tracers that may be configured.
RT tasks (as the current "wakeup" does). This is useful
for those interested in wake up timings of RT tasks.
+ "wakeup_dl"
+
+ Traces and records the max latency that it takes for
+ a SCHED_DEADLINE task to be woken (as the "wakeup" and
+ "wakeup_rt" does).
+
+ "mmiotrace"
+
+ A special tracer that is used to trace binary module.
+ It will trace all the calls that a module makes to the
+ hardware. Everything it writes and reads from the I/O
+ as well.
+
+ "branch"
+
+ This tracer can be configured when tracing likely/unlikely
+ calls within the kernel. It will trace when a likely and
+ unlikely branch is hit and if it was correct in its prediction
+ of being correct.
+
"nop"
This is the "trace nothing" tracer. To remove all
@@ -582,7 +710,7 @@ Examples of using the tracer
----------------------------
Here are typical examples of using the tracers when controlling
-them only with the debugfs interface (without using any
+them only with the tracefs interface (without using any
user-land utilities).
Output format:
@@ -674,7 +802,7 @@ why a latency happened. Here is a typical trace.
This shows that the current tracer is "irqsoff" tracing the time
for which interrupts were disabled. It gives the trace version (which
never changes) and the version of the kernel upon which this was executed on
-(3.10). Then it displays the max latency in microseconds (259 us). The number
+(3.8). Then it displays the max latency in microseconds (259 us). The number
of trace entries displayed and the total number (both are four: #4/4).
VP, KP, SP, and HP are always zero and are reserved for later use.
#P is the number of online CPUs (#P:4).
@@ -709,6 +837,8 @@ explains which is which.
'.' otherwise.
hardirq/softirq:
+ 'Z' - NMI occurred inside a hardirq
+ 'z' - NMI is running
'H' - hard irq occurred inside a softirq.
'h' - hard irq is running
's' - soft irq is running
@@ -757,24 +887,24 @@ nohex
nobin
noblock
trace_printk
-nobranch
annotate
nouserstacktrace
nosym-userobj
noprintk-msg-only
context-info
nolatency-format
-sleep-time
-graph-time
record-cmd
+norecord-tgid
overwrite
nodisable_on_free
irq-info
markers
noevent-fork
function-trace
+nofunction-fork
nodisplay-graph
nostacktrace
+nobranch
To disable one of the options, echo in the option prepended with
"no".
@@ -830,8 +960,6 @@ Here are the available options:
trace_printk - Can disable trace_printk() from writing into the buffer.
- branch - Enable branch tracing with the tracer.
-
annotate - It is sometimes confusing when the CPU buffers are full
and one CPU buffer had a lot of events recently, thus
a shorter time frame, were another CPU may have only had
@@ -850,7 +978,8 @@ Here are the available options:
<idle>-0 [001] .Ns3 21169.031485: sub_preempt_count <-_raw_spin_unlock
userstacktrace - This option changes the trace. It records a
- stacktrace of the current userspace thread.
+ stacktrace of the current user space thread after
+ each trace event.
sym-userobj - when user stacktrace are enabled, look up which
object the address belongs to, and print a
@@ -873,29 +1002,21 @@ x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
context-info - Show only the event data. Hides the comm, PID,
timestamp, CPU, and other useful data.
- latency-format - This option changes the trace. When
- it is enabled, the trace displays
- additional information about the
- latencies, as described in "Latency
- trace format".
-
- sleep-time - When running function graph tracer, to include
- the time a task schedules out in its function.
- When enabled, it will account time the task has been
- scheduled out as part of the function call.
-
- graph-time - When running function profiler with function graph tracer,
- to include the time to call nested functions. When this is
- not set, the time reported for the function will only
- include the time the function itself executed for, not the
- time for functions that it called.
+ latency-format - This option changes the trace output. When it is enabled,
+ the trace displays additional information about the
+ latency, as described in "Latency trace format".
record-cmd - When any event or tracer is enabled, a hook is enabled
- in the sched_switch trace point to fill comm cache
+ in the sched_switch trace point to fill comm cache
with mapped pids and comms. But this may cause some
overhead, and if you only care about pids, and not the
name of the task, disabling this option can lower the
- impact of tracing.
+ impact of tracing. See "saved_cmdlines".
+
+ record-tgid - When any event or tracer is enabled, a hook is enabled
+ in the sched_switch trace point to fill the cache of
+ mapped Thread Group IDs (TGID) mapping to pids. See
+ "saved_tgids".
overwrite - This controls what happens when the trace buffer is
full. If "1" (default), the oldest events are
@@ -935,19 +1056,98 @@ x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
functions. This keeps the overhead of the tracer down
when performing latency tests.
+ function-fork - When set, tasks with PIDs listed in set_ftrace_pid will
+ have the PIDs of their children added to set_ftrace_pid
+ when those tasks fork. Also, when tasks with PIDs in
+ set_ftrace_pid exit, their PIDs will be removed from the
+ file.
+
display-graph - When set, the latency tracers (irqsoff, wakeup, etc) will
use function graph tracing instead of function tracing.
- stacktrace - This is one of the options that changes the trace
- itself. When a trace is recorded, so is the stack
- of functions. This allows for back traces of
- trace sites.
+ stacktrace - When set, a stack trace is recorded after any trace event
+ is recorded.
+
+ branch - Enable branch tracing with the tracer. This enables branch
+ tracer along with the currently set tracer. Enabling this
+ with the "nop" tracer is the same as just enabling the
+ "branch" tracer.
Note: Some tracers have their own options. They only appear in this
file when the tracer is active. They always appear in the
options directory.
+Here are the per tracer options:
+
+Options for function tracer:
+
+ func_stack_trace - When set, a stack trace is recorded after every
+ function that is recorded. NOTE! Limit the functions
+ that are recorded before enabling this, with
+ "set_ftrace_filter" otherwise the system performance
+ will be critically degraded. Remember to disable
+ this option before clearing the function filter.
+
+Options for function_graph tracer:
+
+ Since the function_graph tracer has a slightly different output
+ it has its own options to control what is displayed.
+
+ funcgraph-overrun - When set, the "overrun" of the graph stack is
+ displayed after each function traced. The
+ overrun, is when the stack depth of the calls
+ is greater than what is reserved for each task.
+ Each task has a fixed array of functions to
+ trace in the call graph. If the depth of the
+ calls exceeds that, the function is not traced.
+ The overrun is the number of functions missed
+ due to exceeding this array.
+
+ funcgraph-cpu - When set, the CPU number of the CPU where the trace
+ occurred is displayed.
+
+ funcgraph-overhead - When set, if the function takes longer than
+ A certain amount, then a delay marker is
+ displayed. See "delay" above, under the
+ header description.
+
+ funcgraph-proc - Unlike other tracers, the process' command line
+ is not displayed by default, but instead only
+ when a task is traced in and out during a context
+ switch. Enabling this options has the command
+ of each process displayed at every line.
+
+ funcgraph-duration - At the end of each function (the return)
+ the duration of the amount of time in the
+ function is displayed in microseconds.
+
+ funcgraph-abstime - When set, the timestamp is displayed at each
+ line.
+
+ funcgraph-irqs - When disabled, functions that happen inside an
+ interrupt will not be traced.
+
+ funcgraph-tail - When set, the return event will include the function
+ that it represents. By default this is off, and
+ only a closing curly bracket "}" is displayed for
+ the return of a function.
+
+ sleep-time - When running function graph tracer, to include
+ the time a task schedules out in its function.
+ When enabled, it will account time the task has been
+ scheduled out as part of the function call.
+
+ graph-time - When running function profiler with function graph tracer,
+ to include the time to call nested functions. When this is
+ not set, the time reported for the function will only
+ include the time the function itself executed for, not the
+ time for functions that it called.
+
+Options for blk tracer:
+
+ blk_classic - Shows a more minimalistic output.
+
irqsoff
-------
@@ -1711,6 +1911,85 @@ events.
<idle>-0 2d..3 6us : 0:120:R ==> [002] 5882: 94:R sleep
+Hardware Latency Detector
+-------------------------
+
+The hardware latency detector is executed by enabling the "hwlat" tracer.
+
+NOTE, this tracer will affect the performance of the system as it will
+periodically make a CPU constantly busy with interrupts disabled.
+
+ # echo hwlat > current_tracer
+ # sleep 100
+ # cat trace
+# tracer: hwlat
+#
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ <...>-3638 [001] d... 19452.055471: #1 inner/outer(us): 12/14 ts:1499801089.066141940
+ <...>-3638 [003] d... 19454.071354: #2 inner/outer(us): 11/9 ts:1499801091.082164365
+ <...>-3638 [002] dn.. 19461.126852: #3 inner/outer(us): 12/9 ts:1499801098.138150062
+ <...>-3638 [001] d... 19488.340960: #4 inner/outer(us): 8/12 ts:1499801125.354139633
+ <...>-3638 [003] d... 19494.388553: #5 inner/outer(us): 8/12 ts:1499801131.402150961
+ <...>-3638 [003] d... 19501.283419: #6 inner/outer(us): 0/12 ts:1499801138.297435289 nmi-total:4 nmi-count:1
+
+
+The above output is somewhat the same in the header. All events will have
+interrupts disabled 'd'. Under the FUNCTION title there is:
+
+ #1 - This is the count of events recorded that were greater than the
+ tracing_threshold (See below).
+
+ inner/outer(us): 12/14
+
+ This shows two numbers as "inner latency" and "outer latency". The test
+ runs in a loop checking a timestamp twice. The latency detected within
+ the two timestamps is the "inner latency" and the latency detected
+ after the previous timestamp and the next timestamp in the loop is
+ the "outer latency".
+
+ ts:1499801089.066141940
+
+ The absolute timestamp that the event happened.
+
+ nmi-total:4 nmi-count:1
+
+ On architectures that support it, if an NMI comes in during the
+ test, the time spent in NMI is reported in "nmi-total" (in
+ microseconds).
+
+ All architectures that have NMIs will show the "nmi-count" if an
+ NMI comes in during the test.
+
+hwlat files:
+
+ tracing_threshold - This gets automatically set to "10" to represent 10
+ microseconds. This is the threshold of latency that
+ needs to be detected before the trace will be recorded.
+
+ Note, when hwlat tracer is finished (another tracer is
+ written into "current_tracer"), the original value for
+ tracing_threshold is placed back into this file.
+
+ hwlat_detector/width - The length of time the test runs with interrupts
+ disabled.
+
+ hwlat_detector/window - The length of time of the window which the test
+ runs. That is, the test will run for "width"
+ microseconds per "window" microseconds
+
+ tracing_cpumask - When the test is started. A kernel thread is created that
+ runs the test. This thread will alternate between CPUs
+ listed in the tracing_cpumask between each period
+ (one "window"). To limit the test to specific CPUs
+ set the mask in this file to only the CPUs that the test
+ should run on.
+
function
--------
@@ -1821,15 +2100,15 @@ something like this simple program:
#define STR(x) _STR(x)
#define MAX_PATH 256
-const char *find_debugfs(void)
+const char *find_tracefs(void)
{
- static char debugfs[MAX_PATH+1];
- static int debugfs_found;
+ static char tracefs[MAX_PATH+1];
+ static int tracefs_found;
char type[100];
FILE *fp;
- if (debugfs_found)
- return debugfs;
+ if (tracefs_found)
+ return tracefs;
if ((fp = fopen("/proc/mounts","r")) == NULL) {
perror("/proc/mounts");
@@ -1839,27 +2118,27 @@ const char *find_debugfs(void)
while (fscanf(fp, "%*s %"
STR(MAX_PATH)
"s %99s %*s %*d %*d\n",
- debugfs, type) == 2) {
- if (strcmp(type, "debugfs") == 0)
+ tracefs, type) == 2) {
+ if (strcmp(type, "tracefs") == 0)
break;
}
fclose(fp);
- if (strcmp(type, "debugfs") != 0) {
- fprintf(stderr, "debugfs not mounted");
+ if (strcmp(type, "tracefs") != 0) {
+ fprintf(stderr, "tracefs not mounted");
return NULL;
}
- strcat(debugfs, "/tracing/");
- debugfs_found = 1;
+ strcat(tracefs, "/tracing/");
+ tracefs_found = 1;
- return debugfs;
+ return tracefs;
}
const char *tracing_file(const char *file_name)
{
static char trace_file[MAX_PATH+1];
- snprintf(trace_file, MAX_PATH, "%s/%s", find_debugfs(), file_name);
+ snprintf(trace_file, MAX_PATH, "%s/%s", find_tracefs(), file_name);
return trace_file;
}
@@ -1898,12 +2177,12 @@ Or this simple script!
------
#!/bin/bash
-debugfs=`sed -ne 's/^debugfs \(.*\) debugfs.*/\1/p' /proc/mounts`
-echo nop > $debugfs/tracing/current_tracer
-echo 0 > $debugfs/tracing/tracing_on
-echo $$ > $debugfs/tracing/set_ftrace_pid
-echo function > $debugfs/tracing/current_tracer
-echo 1 > $debugfs/tracing/tracing_on
+tracefs=`sed -ne 's/^tracefs \(.*\) tracefs.*/\1/p' /proc/mounts`
+echo nop > $tracefs/tracing/current_tracer
+echo 0 > $tracefs/tracing/tracing_on
+echo $$ > $tracefs/tracing/set_ftrace_pid
+echo function > $tracefs/tracing/current_tracer
+echo 1 > $tracefs/tracing/tracing_on
exec "$@"
------
@@ -2145,13 +2424,18 @@ include the -pg switch in the compiling of the kernel.)
At compile time every C file object is run through the
recordmcount program (located in the scripts directory). This
program will parse the ELF headers in the C object to find all
-the locations in the .text section that call mcount. (Note, only
-white listed .text sections are processed, since processing other
-sections like .init.text may cause races due to those sections
-being freed unexpectedly).
-
-A new section called "__mcount_loc" is created that holds
-references to all the mcount call sites in the .text section.
+the locations in the .text section that call mcount. Starting
+with gcc verson 4.6, the -mfentry has been added for x86, which
+calls "__fentry__" instead of "mcount". Which is called before
+the creation of the stack frame.
+
+Note, not all sections are traced. They may be prevented by either
+a notrace, or blocked another way and all inline functions are not
+traced. Check the "available_filter_functions" file to see what functions
+can be traced.
+
+A section called "__mcount_loc" is created that holds
+references to all the mcount/fentry call sites in the .text section.
The recordmcount program re-links this section back into the
original object. The final linking stage of the kernel will add all these
references into a single table.
@@ -2679,7 +2963,7 @@ in time without stopping tracing. Ftrace swaps the current
buffer with a spare buffer, and tracing continues in the new
current (=previous spare) buffer.
-The following debugfs files in "tracing" are related to this
+The following tracefs files in "tracing" are related to this
feature:
snapshot:
@@ -2752,7 +3036,7 @@ cat: snapshot: Device or resource busy
Instances
---------
-In the debugfs tracing directory is a directory called "instances".
+In the tracefs tracing directory is a directory called "instances".
This directory can have new directories created inside of it using
mkdir, and removing directories with rmdir. The directory created
with mkdir in this directory will already contain files and other
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index c6f4ead76ce7..38310dcd6620 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -523,11 +523,11 @@ CPU ì—게 기대할 수 있는 ìµœì†Œí•œì˜ ë³´ìž¥ì‚¬í•­ 몇가지가 있습니
즉, ACQUIRE 는 ìµœì†Œí•œì˜ "ì·¨ë“" ë™ìž‘처럼, 그리고 RELEASE 는 ìµœì†Œí•œì˜ "공개"
처럼 ë™ìž‘한다는 ì˜ë¯¸ìž…니다.
-atomic_ops.txt ì—ì„œ 설명ë˜ëŠ” 어토믹 오í¼ë ˆì´ì…˜ë“¤ 중ì—는 완전히 순서잡힌 것들과
-(배리어를 사용하지 않는) ì™„í™”ëœ ìˆœì„œì˜ ê²ƒë“¤ ì™¸ì— ACQUIRE 와 RELEASE 부류ì˜
-ê²ƒë“¤ë„ ì¡´ìž¬í•©ë‹ˆë‹¤. 로드와 스토어를 ëª¨ë‘ ìˆ˜í–‰í•˜ëŠ” ì¡°í•©ëœ ì–´í† ë¯¹ 오í¼ë ˆì´ì…˜ì—ì„œ,
-ACQUIRE 는 해당 오í¼ë ˆì´ì…˜ì˜ 로드 부분ì—만 ì ìš©ë˜ê³  RELEASE 는 해당
-오í¼ë ˆì´ì…˜ì˜ 스토어 부분ì—만 ì ìš©ë©ë‹ˆë‹¤.
+core-api/atomic_ops.rst ì—ì„œ 설명ë˜ëŠ” 어토믹 오í¼ë ˆì´ì…˜ë“¤ 중ì—는 완전히
+순서잡힌 것들과 (배리어를 사용하지 않는) ì™„í™”ëœ ìˆœì„œì˜ ê²ƒë“¤ ì™¸ì— ACQUIRE 와
+RELEASE ë¶€ë¥˜ì˜ ê²ƒë“¤ë„ ì¡´ìž¬í•©ë‹ˆë‹¤. 로드와 스토어를 ëª¨ë‘ ìˆ˜í–‰í•˜ëŠ” ì¡°í•©ëœ ì–´í† ë¯¹
+오í¼ë ˆì´ì…˜ì—ì„œ, ACQUIRE 는 해당 오í¼ë ˆì´ì…˜ì˜ 로드 부분ì—만 ì ìš©ë˜ê³  RELEASE 는
+해당 오í¼ë ˆì´ì…˜ì˜ 스토어 부분ì—만 ì ìš©ë©ë‹ˆë‹¤.
메모리 ë°°ë¦¬ì–´ë“¤ì€ ë‘ CPU ê°„, ë˜ëŠ” CPU 와 디바ì´ìŠ¤ ê°„ì— ìƒí˜¸ìž‘ìš©ì˜ ê°€ëŠ¥ì„±ì´ ìžˆì„
ë•Œì—만 필요합니다. 만약 ì–´ë–¤ ì½”ë“œì— ê·¸ëŸ° ìƒí˜¸ìž‘ìš©ì´ ì—†ì„ ê²ƒì´ ë³´ìž¥ëœë‹¤ë©´, 해당
@@ -1848,7 +1848,7 @@ Mandatory ë°°ë¦¬ì–´ë“¤ì€ SMP 시스템ì—ì„œë„ UP 시스템ì—ì„œë„ SMP 효ê³
ì´ ì½”ë“œëŠ” ê°ì²´ì˜ ì—…ë°ì´íŠ¸ëœ death 마í¬ê°€ ë ˆí¼ëŸ°ìŠ¤ ì¹´ìš´í„° ê°ì†Œ ë™ìž‘
*ì „ì—* ë³´ì¼ ê²ƒì„ ë³´ìž¥í•©ë‹ˆë‹¤.
- ë” ë§Žì€ ì •ë³´ë¥¼ 위해선 Documentation/atomic_ops.txt 문서를 참고하세요.
+ ë” ë§Žì€ ì •ë³´ë¥¼ 위해선 Documentation/core-api/atomic_ops.rst 문서를 참고하세요.
어디서 ì´ê²ƒë“¤ì„ 사용해야 할지 ê¶ê¸ˆí•˜ë‹¤ë©´ "어토믹 오í¼ë ˆì´ì…˜" 서브섹션ì„
참고하세요.
@@ -2550,7 +2550,7 @@ CPU ì—서는 사용ë˜ëŠ” 어토믹 ì¸ìŠ¤íŠ¸ëŸ­ì…˜ ìžì²´ì— 메모리 배리ì
있는ë°, 그런 ê²½ìš°ì— ì´ íŠ¹ìˆ˜ 메모리 배리어 ë„êµ¬ë“¤ì€ no-op ì´ ë˜ì–´ 실질ì ìœ¼ë¡œ
아무ì¼ë„ 하지 않습니다.
-ë” ë§Žì€ ë‚´ìš©ì„ ìœ„í•´ì„  Documentation/atomic_ops.txt 를 참고하세요.
+ë” ë§Žì€ ë‚´ìš©ì„ ìœ„í•´ì„  Documentation/core-api/atomic_ops.rst 를 참고하세요.
디바ì´ìŠ¤ 액세스
diff --git a/Documentation/unaligned-memory-access.txt b/Documentation/unaligned-memory-access.txt
index 3f76c0c37920..51b4ff031586 100644
--- a/Documentation/unaligned-memory-access.txt
+++ b/Documentation/unaligned-memory-access.txt
@@ -1,6 +1,15 @@
+=========================
UNALIGNED MEMORY ACCESSES
=========================
+:Author: Daniel Drake <dsd@gentoo.org>,
+:Author: Johannes Berg <johannes@sipsolutions.net>
+
+:With help from: Alan Cox, Avuton Olrich, Heikki Orsila, Jan Engelhardt,
+ Kyle McMartin, Kyle Moffett, Randy Dunlap, Robert Hancock, Uli Kunitz,
+ Vadim Lobanov
+
+
Linux runs on a wide variety of architectures which have varying behaviour
when it comes to memory access. This document presents some details about
unaligned accesses, why you need to write code that doesn't cause them,
@@ -73,7 +82,7 @@ memory addresses of certain variables, etc.
Fortunately things are not too complex, as in most cases, the compiler
ensures that things will work for you. For example, take the following
-structure:
+structure::
struct foo {
u16 field1;
@@ -106,7 +115,7 @@ On a related topic, with the above considerations in mind you may observe
that you could reorder the fields in the structure in order to place fields
where padding would otherwise be inserted, and hence reduce the overall
resident memory size of structure instances. The optimal layout of the
-above example is:
+above example is::
struct foo {
u32 field2;
@@ -139,21 +148,21 @@ Code that causes unaligned access
With the above in mind, let's move onto a real life example of a function
that can cause an unaligned memory access. The following function taken
from include/linux/etherdevice.h is an optimized routine to compare two
-ethernet MAC addresses for equality.
+ethernet MAC addresses for equality::
-bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
-{
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
+ {
+ #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
return fold == 0;
-#else
+ #else
const u16 *a = (const u16 *)addr1;
const u16 *b = (const u16 *)addr2;
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
-#endif
-}
+ #endif
+ }
In the above function, when the hardware has efficient unaligned access
capability, there is no issue with this code. But when the hardware isn't
@@ -171,7 +180,8 @@ as it is a decent optimization for the cases when you can ensure alignment,
which is true almost all of the time in ethernet networking context.
-Here is another example of some code that could cause unaligned accesses:
+Here is another example of some code that could cause unaligned accesses::
+
void myfunc(u8 *data, u32 value)
{
[...]
@@ -184,6 +194,7 @@ to an address that is not evenly divisible by 4.
In summary, the 2 main scenarios where you may run into unaligned access
problems involve:
+
1. Casting variables to types of different lengths
2. Pointer arithmetic followed by access to at least 2 bytes of data
@@ -195,7 +206,7 @@ The easiest way to avoid unaligned access is to use the get_unaligned() and
put_unaligned() macros provided by the <asm/unaligned.h> header file.
Going back to an earlier example of code that potentially causes unaligned
-access:
+access::
void myfunc(u8 *data, u32 value)
{
@@ -204,7 +215,7 @@ access:
[...]
}
-To avoid the unaligned memory access, you would rewrite it as follows:
+To avoid the unaligned memory access, you would rewrite it as follows::
void myfunc(u8 *data, u32 value)
{
@@ -215,7 +226,7 @@ To avoid the unaligned memory access, you would rewrite it as follows:
}
The get_unaligned() macro works similarly. Assuming 'data' is a pointer to
-memory and you wish to avoid unaligned access, its usage is as follows:
+memory and you wish to avoid unaligned access, its usage is as follows::
u32 value = get_unaligned((u32 *) data);
@@ -245,18 +256,10 @@ For some ethernet hardware that cannot DMA to unaligned addresses like
4*n+2 or non-ethernet hardware, this can be a problem, and it is then
required to copy the incoming frame into an aligned buffer. Because this is
unnecessary on architectures that can do unaligned accesses, the code can be
-made dependent on CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS like so:
-
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- skb = original skb
-#else
- skb = copy skb
-#endif
-
---
-Authors: Daniel Drake <dsd@gentoo.org>,
- Johannes Berg <johannes@sipsolutions.net>
-With help from: Alan Cox, Avuton Olrich, Heikki Orsila, Jan Engelhardt,
-Kyle McMartin, Kyle Moffett, Randy Dunlap, Robert Hancock, Uli Kunitz,
-Vadim Lobanov
+made dependent on CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS like so::
+ #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ skb = original skb
+ #else
+ skb = copy skb
+ #endif
diff --git a/Documentation/vfio-mediated-device.txt b/Documentation/vfio-mediated-device.txt
index e5e57b40f8af..1b3950346532 100644
--- a/Documentation/vfio-mediated-device.txt
+++ b/Documentation/vfio-mediated-device.txt
@@ -1,14 +1,17 @@
-/*
- * VFIO Mediated devices
- *
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- * Author: Neo Jia <cjia@nvidia.com>
- * Kirti Wankhede <kwankhede@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+.. include:: <isonum.txt>
+
+=====================
+VFIO Mediated devices
+=====================
+
+:Copyright: |copy| 2016, NVIDIA CORPORATION. All rights reserved.
+:Author: Neo Jia <cjia@nvidia.com>
+:Author: Kirti Wankhede <kwankhede@nvidia.com>
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 2 as
+published by the Free Software Foundation.
+
Virtual Function I/O (VFIO) Mediated devices[1]
===============================================
@@ -42,7 +45,7 @@ removes it from a VFIO group.
The following high-level block diagram shows the main components and interfaces
in the VFIO mediated driver framework. The diagram shows NVIDIA, Intel, and IBM
-devices as examples, as these devices are the first devices to use this module.
+devices as examples, as these devices are the first devices to use this module::
+---------------+
| |
@@ -91,7 +94,7 @@ Registration Interface for a Mediated Bus Driver
------------------------------------------------
The registration interface for a mediated bus driver provides the following
-structure to represent a mediated device's driver:
+structure to represent a mediated device's driver::
/*
* struct mdev_driver [2] - Mediated device's driver
@@ -110,14 +113,14 @@ structure to represent a mediated device's driver:
A mediated bus driver for mdev should use this structure in the function calls
to register and unregister itself with the core driver:
-* Register:
+* Register::
- extern int mdev_register_driver(struct mdev_driver *drv,
+ extern int mdev_register_driver(struct mdev_driver *drv,
struct module *owner);
-* Unregister:
+* Unregister::
- extern void mdev_unregister_driver(struct mdev_driver *drv);
+ extern void mdev_unregister_driver(struct mdev_driver *drv);
The mediated bus driver is responsible for adding mediated devices to the VFIO
group when devices are bound to the driver and removing mediated devices from
@@ -152,15 +155,15 @@ The callbacks in the mdev_parent_ops structure are as follows:
* mmap: mmap emulation callback
A driver should use the mdev_parent_ops structure in the function call to
-register itself with the mdev core driver:
+register itself with the mdev core driver::
-extern int mdev_register_device(struct device *dev,
- const struct mdev_parent_ops *ops);
+ extern int mdev_register_device(struct device *dev,
+ const struct mdev_parent_ops *ops);
However, the mdev_parent_ops structure is not required in the function call
-that a driver should use to unregister itself with the mdev core driver:
+that a driver should use to unregister itself with the mdev core driver::
-extern void mdev_unregister_device(struct device *dev);
+ extern void mdev_unregister_device(struct device *dev);
Mediated Device Management Interface Through sysfs
@@ -183,30 +186,32 @@ with the mdev core driver.
Directories and files under the sysfs for Each Physical Device
--------------------------------------------------------------
-|- [parent physical device]
-|--- Vendor-specific-attributes [optional]
-|--- [mdev_supported_types]
-| |--- [<type-id>]
-| | |--- create
-| | |--- name
-| | |--- available_instances
-| | |--- device_api
-| | |--- description
-| | |--- [devices]
-| |--- [<type-id>]
-| | |--- create
-| | |--- name
-| | |--- available_instances
-| | |--- device_api
-| | |--- description
-| | |--- [devices]
-| |--- [<type-id>]
-| |--- create
-| |--- name
-| |--- available_instances
-| |--- device_api
-| |--- description
-| |--- [devices]
+::
+
+ |- [parent physical device]
+ |--- Vendor-specific-attributes [optional]
+ |--- [mdev_supported_types]
+ | |--- [<type-id>]
+ | | |--- create
+ | | |--- name
+ | | |--- available_instances
+ | | |--- device_api
+ | | |--- description
+ | | |--- [devices]
+ | |--- [<type-id>]
+ | | |--- create
+ | | |--- name
+ | | |--- available_instances
+ | | |--- device_api
+ | | |--- description
+ | | |--- [devices]
+ | |--- [<type-id>]
+ | |--- create
+ | |--- name
+ | |--- available_instances
+ | |--- device_api
+ | |--- description
+ | |--- [devices]
* [mdev_supported_types]
@@ -219,12 +224,12 @@ Directories and files under the sysfs for Each Physical Device
The [<type-id>] name is created by adding the device driver string as a prefix
to the string provided by the vendor driver. This format of this name is as
- follows:
+ follows::
sprintf(buf, "%s-%s", dev_driver_string(parent->dev), group->name);
(or using mdev_parent_dev(mdev) to arrive at the parent device outside
- of the core mdev code)
+ of the core mdev code)
* device_api
@@ -239,7 +244,7 @@ Directories and files under the sysfs for Each Physical Device
* [device]
This directory contains links to the devices of type <type-id> that have been
-created.
+ created.
* name
@@ -253,21 +258,25 @@ created.
Directories and Files Under the sysfs for Each mdev Device
----------------------------------------------------------
-|- [parent phy device]
-|--- [$MDEV_UUID]
+::
+
+ |- [parent phy device]
+ |--- [$MDEV_UUID]
|--- remove
|--- mdev_type {link to its type}
|--- vendor-specific-attributes [optional]
* remove (write only)
+
Writing '1' to the 'remove' file destroys the mdev device. The vendor driver can
fail the remove() callback if that device is active and the vendor driver
doesn't support hot unplug.
-Example:
+Example::
+
# echo 1 > /sys/bus/mdev/devices/$mdev_UUID/remove
-Mediated device Hot plug:
+Mediated device Hot plug
------------------------
Mediated devices can be created and assigned at runtime. The procedure to hot
@@ -277,13 +286,13 @@ Translation APIs for Mediated Devices
=====================================
The following APIs are provided for translating user pfn to host pfn in a VFIO
-driver:
+driver::
-extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
- int npage, int prot, unsigned long *phys_pfn);
+ extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
+ int npage, int prot, unsigned long *phys_pfn);
-extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
- int npage);
+ extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
+ int npage);
These functions call back into the back-end IOMMU module by using the pin_pages
and unpin_pages callbacks of the struct vfio_iommu_driver_ops[4]. Currently
@@ -304,81 +313,80 @@ card.
This step creates a dummy device, /sys/devices/virtual/mtty/mtty/
- Files in this device directory in sysfs are similar to the following:
-
- # tree /sys/devices/virtual/mtty/mtty/
- /sys/devices/virtual/mtty/mtty/
- |-- mdev_supported_types
- | |-- mtty-1
- | | |-- available_instances
- | | |-- create
- | | |-- device_api
- | | |-- devices
- | | `-- name
- | `-- mtty-2
- | |-- available_instances
- | |-- create
- | |-- device_api
- | |-- devices
- | `-- name
- |-- mtty_dev
- | `-- sample_mtty_dev
- |-- power
- | |-- autosuspend_delay_ms
- | |-- control
- | |-- runtime_active_time
- | |-- runtime_status
- | `-- runtime_suspended_time
- |-- subsystem -> ../../../../class/mtty
- `-- uevent
+ Files in this device directory in sysfs are similar to the following::
+
+ # tree /sys/devices/virtual/mtty/mtty/
+ /sys/devices/virtual/mtty/mtty/
+ |-- mdev_supported_types
+ | |-- mtty-1
+ | | |-- available_instances
+ | | |-- create
+ | | |-- device_api
+ | | |-- devices
+ | | `-- name
+ | `-- mtty-2
+ | |-- available_instances
+ | |-- create
+ | |-- device_api
+ | |-- devices
+ | `-- name
+ |-- mtty_dev
+ | `-- sample_mtty_dev
+ |-- power
+ | |-- autosuspend_delay_ms
+ | |-- control
+ | |-- runtime_active_time
+ | |-- runtime_status
+ | `-- runtime_suspended_time
+ |-- subsystem -> ../../../../class/mtty
+ `-- uevent
2. Create a mediated device by using the dummy device that you created in the
- previous step.
+ previous step::
- # echo "83b8f4f2-509f-382f-3c1e-e6bfe0fa1001" > \
+ # echo "83b8f4f2-509f-382f-3c1e-e6bfe0fa1001" > \
/sys/devices/virtual/mtty/mtty/mdev_supported_types/mtty-2/create
-3. Add parameters to qemu-kvm.
+3. Add parameters to qemu-kvm::
- -device vfio-pci,\
- sysfsdev=/sys/bus/mdev/devices/83b8f4f2-509f-382f-3c1e-e6bfe0fa1001
+ -device vfio-pci,\
+ sysfsdev=/sys/bus/mdev/devices/83b8f4f2-509f-382f-3c1e-e6bfe0fa1001
4. Boot the VM.
In the Linux guest VM, with no hardware on the host, the device appears
- as follows:
-
- # lspci -s 00:05.0 -xxvv
- 00:05.0 Serial controller: Device 4348:3253 (rev 10) (prog-if 02 [16550])
- Subsystem: Device 4348:3253
- Physical Slot: 5
- Control: I/O+ Mem- BusMaster- SpecCycle- MemWINV- VGASnoop- ParErr-
- Stepping- SERR- FastB2B- DisINTx-
- Status: Cap- 66MHz- UDF- FastB2B- ParErr- DEVSEL=medium >TAbort-
- <TAbort- <MAbort- >SERR- <PERR- INTx-
- Interrupt: pin A routed to IRQ 10
- Region 0: I/O ports at c150 [size=8]
- Region 1: I/O ports at c158 [size=8]
- Kernel driver in use: serial
- 00: 48 43 53 32 01 00 00 02 10 02 00 07 00 00 00 00
- 10: 51 c1 00 00 59 c1 00 00 00 00 00 00 00 00 00 00
- 20: 00 00 00 00 00 00 00 00 00 00 00 00 48 43 53 32
- 30: 00 00 00 00 00 00 00 00 00 00 00 00 0a 01 00 00
-
- In the Linux guest VM, dmesg output for the device is as follows:
-
- serial 0000:00:05.0: PCI INT A -> Link[LNKA] -> GSI 10 (level, high) -> IRQ
-10
- 0000:00:05.0: ttyS1 at I/O 0xc150 (irq = 10) is a 16550A
- 0000:00:05.0: ttyS2 at I/O 0xc158 (irq = 10) is a 16550A
-
-
-5. In the Linux guest VM, check the serial ports.
-
- # setserial -g /dev/ttyS*
- /dev/ttyS0, UART: 16550A, Port: 0x03f8, IRQ: 4
- /dev/ttyS1, UART: 16550A, Port: 0xc150, IRQ: 10
- /dev/ttyS2, UART: 16550A, Port: 0xc158, IRQ: 10
+ as follows::
+
+ # lspci -s 00:05.0 -xxvv
+ 00:05.0 Serial controller: Device 4348:3253 (rev 10) (prog-if 02 [16550])
+ Subsystem: Device 4348:3253
+ Physical Slot: 5
+ Control: I/O+ Mem- BusMaster- SpecCycle- MemWINV- VGASnoop- ParErr-
+ Stepping- SERR- FastB2B- DisINTx-
+ Status: Cap- 66MHz- UDF- FastB2B- ParErr- DEVSEL=medium >TAbort-
+ <TAbort- <MAbort- >SERR- <PERR- INTx-
+ Interrupt: pin A routed to IRQ 10
+ Region 0: I/O ports at c150 [size=8]
+ Region 1: I/O ports at c158 [size=8]
+ Kernel driver in use: serial
+ 00: 48 43 53 32 01 00 00 02 10 02 00 07 00 00 00 00
+ 10: 51 c1 00 00 59 c1 00 00 00 00 00 00 00 00 00 00
+ 20: 00 00 00 00 00 00 00 00 00 00 00 00 48 43 53 32
+ 30: 00 00 00 00 00 00 00 00 00 00 00 00 0a 01 00 00
+
+ In the Linux guest VM, dmesg output for the device is as follows:
+
+ serial 0000:00:05.0: PCI INT A -> Link[LNKA] -> GSI 10 (level, high) -> IRQ 10
+ 0000:00:05.0: ttyS1 at I/O 0xc150 (irq = 10) is a 16550A
+ 0000:00:05.0: ttyS2 at I/O 0xc158 (irq = 10) is a 16550A
+
+
+5. In the Linux guest VM, check the serial ports::
+
+ # setserial -g /dev/ttyS*
+ /dev/ttyS0, UART: 16550A, Port: 0x03f8, IRQ: 4
+ /dev/ttyS1, UART: 16550A, Port: 0xc150, IRQ: 10
+ /dev/ttyS2, UART: 16550A, Port: 0xc158, IRQ: 10
6. Using minicom or any terminal emulation program, open port /dev/ttyS1 or
/dev/ttyS2 with hardware flow control disabled.
@@ -388,14 +396,14 @@ card.
Data is loop backed from hosts mtty driver.
-8. Destroy the mediated device that you created.
+8. Destroy the mediated device that you created::
- # echo 1 > /sys/bus/mdev/devices/83b8f4f2-509f-382f-3c1e-e6bfe0fa1001/remove
+ # echo 1 > /sys/bus/mdev/devices/83b8f4f2-509f-382f-3c1e-e6bfe0fa1001/remove
References
==========
-[1] See Documentation/vfio.txt for more information on VFIO.
-[2] struct mdev_driver in include/linux/mdev.h
-[3] struct mdev_parent_ops in include/linux/mdev.h
-[4] struct vfio_iommu_driver_ops in include/linux/vfio.h
+1. See Documentation/vfio.txt for more information on VFIO.
+2. struct mdev_driver in include/linux/mdev.h
+3. struct mdev_parent_ops in include/linux/mdev.h
+4. struct vfio_iommu_driver_ops in include/linux/vfio.h
diff --git a/Documentation/vfio.txt b/Documentation/vfio.txt
index 1dd3fddfd3a1..ef6a5111eaa1 100644
--- a/Documentation/vfio.txt
+++ b/Documentation/vfio.txt
@@ -1,5 +1,7 @@
-VFIO - "Virtual Function I/O"[1]
--------------------------------------------------------------------------------
+==================================
+VFIO - "Virtual Function I/O" [1]_
+==================================
+
Many modern system now provide DMA and interrupt remapping facilities
to help ensure I/O devices behave within the boundaries they've been
allotted. This includes x86 hardware with AMD-Vi and Intel VT-d,
@@ -7,14 +9,14 @@ POWER systems with Partitionable Endpoints (PEs) and embedded PowerPC
systems such as Freescale PAMU. The VFIO driver is an IOMMU/device
agnostic framework for exposing direct device access to userspace, in
a secure, IOMMU protected environment. In other words, this allows
-safe[2], non-privileged, userspace drivers.
+safe [2]_, non-privileged, userspace drivers.
Why do we want that? Virtual machines often make use of direct device
access ("device assignment") when configured for the highest possible
I/O performance. From a device and host perspective, this simply
turns the VM into a userspace driver, with the benefits of
significantly reduced latency, higher bandwidth, and direct use of
-bare-metal device drivers[3].
+bare-metal device drivers [3]_.
Some applications, particularly in the high performance computing
field, also benefit from low-overhead, direct device access from
@@ -31,7 +33,7 @@ KVM PCI specific device assignment code as well as provide a more
secure, more featureful userspace driver environment than UIO.
Groups, Devices, and IOMMUs
--------------------------------------------------------------------------------
+---------------------------
Devices are the main target of any I/O driver. Devices typically
create a programming interface made up of I/O access, interrupts,
@@ -114,40 +116,40 @@ well as mechanisms for describing and registering interrupt
notifications.
VFIO Usage Example
--------------------------------------------------------------------------------
+------------------
-Assume user wants to access PCI device 0000:06:0d.0
+Assume user wants to access PCI device 0000:06:0d.0::
-$ readlink /sys/bus/pci/devices/0000:06:0d.0/iommu_group
-../../../../kernel/iommu_groups/26
+ $ readlink /sys/bus/pci/devices/0000:06:0d.0/iommu_group
+ ../../../../kernel/iommu_groups/26
This device is therefore in IOMMU group 26. This device is on the
pci bus, therefore the user will make use of vfio-pci to manage the
-group:
+group::
-# modprobe vfio-pci
+ # modprobe vfio-pci
Binding this device to the vfio-pci driver creates the VFIO group
-character devices for this group:
+character devices for this group::
-$ lspci -n -s 0000:06:0d.0
-06:0d.0 0401: 1102:0002 (rev 08)
-# echo 0000:06:0d.0 > /sys/bus/pci/devices/0000:06:0d.0/driver/unbind
-# echo 1102 0002 > /sys/bus/pci/drivers/vfio-pci/new_id
+ $ lspci -n -s 0000:06:0d.0
+ 06:0d.0 0401: 1102:0002 (rev 08)
+ # echo 0000:06:0d.0 > /sys/bus/pci/devices/0000:06:0d.0/driver/unbind
+ # echo 1102 0002 > /sys/bus/pci/drivers/vfio-pci/new_id
Now we need to look at what other devices are in the group to free
-it for use by VFIO:
-
-$ ls -l /sys/bus/pci/devices/0000:06:0d.0/iommu_group/devices
-total 0
-lrwxrwxrwx. 1 root root 0 Apr 23 16:13 0000:00:1e.0 ->
- ../../../../devices/pci0000:00/0000:00:1e.0
-lrwxrwxrwx. 1 root root 0 Apr 23 16:13 0000:06:0d.0 ->
- ../../../../devices/pci0000:00/0000:00:1e.0/0000:06:0d.0
-lrwxrwxrwx. 1 root root 0 Apr 23 16:13 0000:06:0d.1 ->
- ../../../../devices/pci0000:00/0000:00:1e.0/0000:06:0d.1
-
-This device is behind a PCIe-to-PCI bridge[4], therefore we also
+it for use by VFIO::
+
+ $ ls -l /sys/bus/pci/devices/0000:06:0d.0/iommu_group/devices
+ total 0
+ lrwxrwxrwx. 1 root root 0 Apr 23 16:13 0000:00:1e.0 ->
+ ../../../../devices/pci0000:00/0000:00:1e.0
+ lrwxrwxrwx. 1 root root 0 Apr 23 16:13 0000:06:0d.0 ->
+ ../../../../devices/pci0000:00/0000:00:1e.0/0000:06:0d.0
+ lrwxrwxrwx. 1 root root 0 Apr 23 16:13 0000:06:0d.1 ->
+ ../../../../devices/pci0000:00/0000:00:1e.0/0000:06:0d.1
+
+This device is behind a PCIe-to-PCI bridge [4]_, therefore we also
need to add device 0000:06:0d.1 to the group following the same
procedure as above. Device 0000:00:1e.0 is a bridge that does
not currently have a host driver, therefore it's not required to
@@ -157,12 +159,12 @@ support PCI bridges).
The final step is to provide the user with access to the group if
unprivileged operation is desired (note that /dev/vfio/vfio provides
no capabilities on its own and is therefore expected to be set to
-mode 0666 by the system).
+mode 0666 by the system)::
-# chown user:user /dev/vfio/26
+ # chown user:user /dev/vfio/26
The user now has full access to all the devices and the iommu for this
-group and can access them as follows:
+group and can access them as follows::
int container, group, device, i;
struct vfio_group_status group_status =
@@ -248,31 +250,31 @@ VFIO bus driver API
VFIO bus drivers, such as vfio-pci make use of only a few interfaces
into VFIO core. When devices are bound and unbound to the driver,
the driver should call vfio_add_group_dev() and vfio_del_group_dev()
-respectively:
+respectively::
-extern int vfio_add_group_dev(struct iommu_group *iommu_group,
- struct device *dev,
- const struct vfio_device_ops *ops,
- void *device_data);
+ extern int vfio_add_group_dev(struct iommu_group *iommu_group,
+ struct device *dev,
+ const struct vfio_device_ops *ops,
+ void *device_data);
-extern void *vfio_del_group_dev(struct device *dev);
+ extern void *vfio_del_group_dev(struct device *dev);
vfio_add_group_dev() indicates to the core to begin tracking the
specified iommu_group and register the specified dev as owned by
a VFIO bus driver. The driver provides an ops structure for callbacks
-similar to a file operations structure:
-
-struct vfio_device_ops {
- int (*open)(void *device_data);
- void (*release)(void *device_data);
- ssize_t (*read)(void *device_data, char __user *buf,
- size_t count, loff_t *ppos);
- ssize_t (*write)(void *device_data, const char __user *buf,
- size_t size, loff_t *ppos);
- long (*ioctl)(void *device_data, unsigned int cmd,
- unsigned long arg);
- int (*mmap)(void *device_data, struct vm_area_struct *vma);
-};
+similar to a file operations structure::
+
+ struct vfio_device_ops {
+ int (*open)(void *device_data);
+ void (*release)(void *device_data);
+ ssize_t (*read)(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(void *device_data, const char __user *buf,
+ size_t size, loff_t *ppos);
+ long (*ioctl)(void *device_data, unsigned int cmd,
+ unsigned long arg);
+ int (*mmap)(void *device_data, struct vm_area_struct *vma);
+ };
Each function is passed the device_data that was originally registered
in the vfio_add_group_dev() call above. This allows the bus driver
@@ -285,50 +287,55 @@ own VFIO_DEVICE_GET_REGION_INFO ioctl.
PPC64 sPAPR implementation note
--------------------------------------------------------------------------------
+-------------------------------
This implementation has some specifics:
1) On older systems (POWER7 with P5IOC2/IODA1) only one IOMMU group per
-container is supported as an IOMMU table is allocated at the boot time,
-one table per a IOMMU group which is a Partitionable Endpoint (PE)
-(PE is often a PCI domain but not always).
-Newer systems (POWER8 with IODA2) have improved hardware design which allows
-to remove this limitation and have multiple IOMMU groups per a VFIO container.
+ container is supported as an IOMMU table is allocated at the boot time,
+ one table per a IOMMU group which is a Partitionable Endpoint (PE)
+ (PE is often a PCI domain but not always).
+
+ Newer systems (POWER8 with IODA2) have improved hardware design which allows
+ to remove this limitation and have multiple IOMMU groups per a VFIO
+ container.
2) The hardware supports so called DMA windows - the PCI address range
-within which DMA transfer is allowed, any attempt to access address space
-out of the window leads to the whole PE isolation.
+ within which DMA transfer is allowed, any attempt to access address space
+ out of the window leads to the whole PE isolation.
3) PPC64 guests are paravirtualized but not fully emulated. There is an API
-to map/unmap pages for DMA, and it normally maps 1..32 pages per call and
-currently there is no way to reduce the number of calls. In order to make things
-faster, the map/unmap handling has been implemented in real mode which provides
-an excellent performance which has limitations such as inability to do
-locked pages accounting in real time.
+ to map/unmap pages for DMA, and it normally maps 1..32 pages per call and
+ currently there is no way to reduce the number of calls. In order to make
+ things faster, the map/unmap handling has been implemented in real mode
+ which provides an excellent performance which has limitations such as
+ inability to do locked pages accounting in real time.
4) According to sPAPR specification, A Partitionable Endpoint (PE) is an I/O
-subtree that can be treated as a unit for the purposes of partitioning and
-error recovery. A PE may be a single or multi-function IOA (IO Adapter), a
-function of a multi-function IOA, or multiple IOAs (possibly including switch
-and bridge structures above the multiple IOAs). PPC64 guests detect PCI errors
-and recover from them via EEH RTAS services, which works on the basis of
-additional ioctl commands.
+ subtree that can be treated as a unit for the purposes of partitioning and
+ error recovery. A PE may be a single or multi-function IOA (IO Adapter), a
+ function of a multi-function IOA, or multiple IOAs (possibly including
+ switch and bridge structures above the multiple IOAs). PPC64 guests detect
+ PCI errors and recover from them via EEH RTAS services, which works on the
+ basis of additional ioctl commands.
-So 4 additional ioctls have been added:
+ So 4 additional ioctls have been added:
- VFIO_IOMMU_SPAPR_TCE_GET_INFO - returns the size and the start
- of the DMA window on the PCI bus.
+ VFIO_IOMMU_SPAPR_TCE_GET_INFO
+ returns the size and the start of the DMA window on the PCI bus.
- VFIO_IOMMU_ENABLE - enables the container. The locked pages accounting
+ VFIO_IOMMU_ENABLE
+ enables the container. The locked pages accounting
is done at this point. This lets user first to know what
the DMA window is and adjust rlimit before doing any real job.
- VFIO_IOMMU_DISABLE - disables the container.
+ VFIO_IOMMU_DISABLE
+ disables the container.
- VFIO_EEH_PE_OP - provides an API for EEH setup, error detection and recovery.
+ VFIO_EEH_PE_OP
+ provides an API for EEH setup, error detection and recovery.
-The code flow from the example above should be slightly changed:
+ The code flow from the example above should be slightly changed::
struct vfio_eeh_pe_op pe_op = { .argsz = sizeof(pe_op), .flags = 0 };
@@ -442,73 +449,73 @@ The code flow from the example above should be slightly changed:
....
5) There is v2 of SPAPR TCE IOMMU. It deprecates VFIO_IOMMU_ENABLE/
-VFIO_IOMMU_DISABLE and implements 2 new ioctls:
-VFIO_IOMMU_SPAPR_REGISTER_MEMORY and VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY
-(which are unsupported in v1 IOMMU).
+ VFIO_IOMMU_DISABLE and implements 2 new ioctls:
+ VFIO_IOMMU_SPAPR_REGISTER_MEMORY and VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY
+ (which are unsupported in v1 IOMMU).
-PPC64 paravirtualized guests generate a lot of map/unmap requests,
-and the handling of those includes pinning/unpinning pages and updating
-mm::locked_vm counter to make sure we do not exceed the rlimit.
-The v2 IOMMU splits accounting and pinning into separate operations:
+ PPC64 paravirtualized guests generate a lot of map/unmap requests,
+ and the handling of those includes pinning/unpinning pages and updating
+ mm::locked_vm counter to make sure we do not exceed the rlimit.
+ The v2 IOMMU splits accounting and pinning into separate operations:
-- VFIO_IOMMU_SPAPR_REGISTER_MEMORY/VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY ioctls
-receive a user space address and size of the block to be pinned.
-Bisecting is not supported and VFIO_IOMMU_UNREGISTER_MEMORY is expected to
-be called with the exact address and size used for registering
-the memory block. The userspace is not expected to call these often.
-The ranges are stored in a linked list in a VFIO container.
+ - VFIO_IOMMU_SPAPR_REGISTER_MEMORY/VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY ioctls
+ receive a user space address and size of the block to be pinned.
+ Bisecting is not supported and VFIO_IOMMU_UNREGISTER_MEMORY is expected to
+ be called with the exact address and size used for registering
+ the memory block. The userspace is not expected to call these often.
+ The ranges are stored in a linked list in a VFIO container.
-- VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA ioctls only update the actual
-IOMMU table and do not do pinning; instead these check that the userspace
-address is from pre-registered range.
+ - VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA ioctls only update the actual
+ IOMMU table and do not do pinning; instead these check that the userspace
+ address is from pre-registered range.
-This separation helps in optimizing DMA for guests.
+ This separation helps in optimizing DMA for guests.
6) sPAPR specification allows guests to have an additional DMA window(s) on
-a PCI bus with a variable page size. Two ioctls have been added to support
-this: VFIO_IOMMU_SPAPR_TCE_CREATE and VFIO_IOMMU_SPAPR_TCE_REMOVE.
-The platform has to support the functionality or error will be returned to
-the userspace. The existing hardware supports up to 2 DMA windows, one is
-2GB long, uses 4K pages and called "default 32bit window"; the other can
-be as big as entire RAM, use different page size, it is optional - guests
-create those in run-time if the guest driver supports 64bit DMA.
-
-VFIO_IOMMU_SPAPR_TCE_CREATE receives a page shift, a DMA window size and
-a number of TCE table levels (if a TCE table is going to be big enough and
-the kernel may not be able to allocate enough of physically contiguous memory).
-It creates a new window in the available slot and returns the bus address where
-the new window starts. Due to hardware limitation, the user space cannot choose
-the location of DMA windows.
-
-VFIO_IOMMU_SPAPR_TCE_REMOVE receives the bus start address of the window
-and removes it.
+ a PCI bus with a variable page size. Two ioctls have been added to support
+ this: VFIO_IOMMU_SPAPR_TCE_CREATE and VFIO_IOMMU_SPAPR_TCE_REMOVE.
+ The platform has to support the functionality or error will be returned to
+ the userspace. The existing hardware supports up to 2 DMA windows, one is
+ 2GB long, uses 4K pages and called "default 32bit window"; the other can
+ be as big as entire RAM, use different page size, it is optional - guests
+ create those in run-time if the guest driver supports 64bit DMA.
+
+ VFIO_IOMMU_SPAPR_TCE_CREATE receives a page shift, a DMA window size and
+ a number of TCE table levels (if a TCE table is going to be big enough and
+ the kernel may not be able to allocate enough of physically contiguous
+ memory). It creates a new window in the available slot and returns the bus
+ address where the new window starts. Due to hardware limitation, the user
+ space cannot choose the location of DMA windows.
+
+ VFIO_IOMMU_SPAPR_TCE_REMOVE receives the bus start address of the window
+ and removes it.
-------------------------------------------------------------------------------
-[1] VFIO was originally an acronym for "Virtual Function I/O" in its
-initial implementation by Tom Lyon while as Cisco. We've since
-outgrown the acronym, but it's catchy.
-
-[2] "safe" also depends upon a device being "well behaved". It's
-possible for multi-function devices to have backdoors between
-functions and even for single function devices to have alternative
-access to things like PCI config space through MMIO registers. To
-guard against the former we can include additional precautions in the
-IOMMU driver to group multi-function PCI devices together
-(iommu=group_mf). The latter we can't prevent, but the IOMMU should
-still provide isolation. For PCI, SR-IOV Virtual Functions are the
-best indicator of "well behaved", as these are designed for
-virtualization usage models.
-
-[3] As always there are trade-offs to virtual machine device
-assignment that are beyond the scope of VFIO. It's expected that
-future IOMMU technologies will reduce some, but maybe not all, of
-these trade-offs.
-
-[4] In this case the device is below a PCI bridge, so transactions
-from either function of the device are indistinguishable to the iommu:
-
--[0000:00]-+-1e.0-[06]--+-0d.0
- \-0d.1
-
-00:1e.0 PCI bridge: Intel Corporation 82801 PCI Bridge (rev 90)
+.. [1] VFIO was originally an acronym for "Virtual Function I/O" in its
+ initial implementation by Tom Lyon while as Cisco. We've since
+ outgrown the acronym, but it's catchy.
+
+.. [2] "safe" also depends upon a device being "well behaved". It's
+ possible for multi-function devices to have backdoors between
+ functions and even for single function devices to have alternative
+ access to things like PCI config space through MMIO registers. To
+ guard against the former we can include additional precautions in the
+ IOMMU driver to group multi-function PCI devices together
+ (iommu=group_mf). The latter we can't prevent, but the IOMMU should
+ still provide isolation. For PCI, SR-IOV Virtual Functions are the
+ best indicator of "well behaved", as these are designed for
+ virtualization usage models.
+
+.. [3] As always there are trade-offs to virtual machine device
+ assignment that are beyond the scope of VFIO. It's expected that
+ future IOMMU technologies will reduce some, but maybe not all, of
+ these trade-offs.
+
+.. [4] In this case the device is below a PCI bridge, so transactions
+ from either function of the device are indistinguishable to the iommu::
+
+ -[0000:00]-+-1e.0-[06]--+-0d.0
+ \-0d.1
+
+ 00:1e.0 PCI bridge: Intel Corporation 82801 PCI Bridge (rev 90)
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 3a9831b72945..e63a35fafef0 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -4329,3 +4329,21 @@ Querying this capability returns a bitmap indicating the possible
virtual SMT modes that can be set using KVM_CAP_PPC_SMT. If bit N
(counting from the right) is set, then a virtual SMT mode of 2^N is
available.
+
+8.11 KVM_CAP_HYPERV_SYNIC2
+
+Architectures: x86
+
+This capability enables a newer version of Hyper-V Synthetic interrupt
+controller (SynIC). The only difference with KVM_CAP_HYPERV_SYNIC is that KVM
+doesn't clear SynIC message and event flags pages when they are enabled by
+writing to the respective MSRs.
+
+8.12 KVM_CAP_HYPERV_VP_INDEX
+
+Architectures: x86
+
+This capability indicates that userspace can load HV_X64_MSR_VP_INDEX msr. Its
+value is used to denote the target vcpu for a SynIC interrupt. For
+compatibilty, KVM initializes this msr to KVM's internal vcpu index. When this
+capability is absent, userspace can still query this msr's value.
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
index 0a9ea515512a..1ebecc115dc6 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virtual/kvm/msr.txt
@@ -166,10 +166,11 @@ MSR_KVM_SYSTEM_TIME: 0x12
MSR_KVM_ASYNC_PF_EN: 0x4b564d02
data: Bits 63-6 hold 64-byte aligned physical address of a
64 byte memory area which must be in guest RAM and must be
- zeroed. Bits 5-2 are reserved and should be zero. Bit 0 is 1
+ zeroed. Bits 5-3 are reserved and should be zero. Bit 0 is 1
when asynchronous page faults are enabled on the vcpu 0 when
disabled. Bit 1 is 1 if asynchronous page faults can be injected
- when vcpu is in cpl == 0.
+ when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults
+ are delivered to L1 as #PF vmexits.
First 4 byte of 64 byte memory location will be written to by
the hypervisor at the time of asynchronous page fault (APF)
diff --git a/Documentation/xillybus.txt b/Documentation/xillybus.txt
index 1660145b9969..2446ee303c09 100644
--- a/Documentation/xillybus.txt
+++ b/Documentation/xillybus.txt
@@ -1,12 +1,11 @@
+==========================================
+Xillybus driver for generic FPGA interface
+==========================================
- ==========================================
- Xillybus driver for generic FPGA interface
- ==========================================
+:Author: Eli Billauer, Xillybus Ltd. (http://xillybus.com)
+:Email: eli.billauer@gmail.com or as advertised on Xillybus' site.
-Author: Eli Billauer, Xillybus Ltd. (http://xillybus.com)
-Email: eli.billauer@gmail.com or as advertised on Xillybus' site.
-
-Contents:
+.. Contents:
- Introduction
-- Background
@@ -17,7 +16,7 @@ Contents:
-- Synchronization
-- Seekable pipes
-- Internals
+ - Internals
-- Source code organization
-- Pipe attributes
-- Host never reads from the FPGA
@@ -29,7 +28,7 @@ Contents:
-- The "nonempty" message (supporting poll)
-INTRODUCTION
+Introduction
============
Background
@@ -105,7 +104,7 @@ driver is used to work out of the box with any Xillybus IP core.
The data structure just mentioned should not be confused with PCI's
configuration space or the Flattened Device Tree.
-USAGE
+Usage
=====
User interface
@@ -117,11 +116,11 @@ names of these files depend on the IP core that is loaded in the FPGA (see
Probing below). To communicate with the FPGA, open the device file that
corresponds to the hardware FIFO you want to send data or receive data from,
and use plain write() or read() calls, just like with a regular pipe. In
-particular, it makes perfect sense to go:
+particular, it makes perfect sense to go::
-$ cat mydata > /dev/xillybus_thisfifo
+ $ cat mydata > /dev/xillybus_thisfifo
-$ cat /dev/xillybus_thatfifo > hisdata
+ $ cat /dev/xillybus_thatfifo > hisdata
possibly pressing CTRL-C as some stage, even though the xillybus_* pipes have
the capability to send an EOF (but may not use it).
@@ -178,7 +177,7 @@ the attached memory is done by seeking to the desired address, and calling
read() or write() as required.
-INTERNALS
+Internals
=========
Source code organization
@@ -365,7 +364,7 @@ into that page. It can be shown that all pages requested from the kernel
(except possibly for the last) are 100% utilized this way.
The "nonempty" message (supporting poll)
----------------------------------------
+----------------------------------------
In order to support the "poll" method (and hence select() ), there is a small
catch regarding the FPGA to host direction: The FPGA may have filled a DMA
diff --git a/Documentation/xz.txt b/Documentation/xz.txt
index 2cf3e2608de3..b2220d03aa50 100644
--- a/Documentation/xz.txt
+++ b/Documentation/xz.txt
@@ -1,121 +1,127 @@
-
+============================
XZ data compression in Linux
============================
Introduction
+============
- XZ is a general purpose data compression format with high compression
- ratio and relatively fast decompression. The primary compression
- algorithm (filter) is LZMA2. Additional filters can be used to improve
- compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters
- improve compression ratio of executable data.
+XZ is a general purpose data compression format with high compression
+ratio and relatively fast decompression. The primary compression
+algorithm (filter) is LZMA2. Additional filters can be used to improve
+compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters
+improve compression ratio of executable data.
- The XZ decompressor in Linux is called XZ Embedded. It supports
- the LZMA2 filter and optionally also BCJ filters. CRC32 is supported
- for integrity checking. The home page of XZ Embedded is at
- <http://tukaani.org/xz/embedded.html>, where you can find the
- latest version and also information about using the code outside
- the Linux kernel.
+The XZ decompressor in Linux is called XZ Embedded. It supports
+the LZMA2 filter and optionally also BCJ filters. CRC32 is supported
+for integrity checking. The home page of XZ Embedded is at
+<http://tukaani.org/xz/embedded.html>, where you can find the
+latest version and also information about using the code outside
+the Linux kernel.
- For userspace, XZ Utils provide a zlib-like compression library
- and a gzip-like command line tool. XZ Utils can be downloaded from
- <http://tukaani.org/xz/>.
+For userspace, XZ Utils provide a zlib-like compression library
+and a gzip-like command line tool. XZ Utils can be downloaded from
+<http://tukaani.org/xz/>.
XZ related components in the kernel
-
- The xz_dec module provides XZ decompressor with single-call (buffer
- to buffer) and multi-call (stateful) APIs. The usage of the xz_dec
- module is documented in include/linux/xz.h.
-
- The xz_dec_test module is for testing xz_dec. xz_dec_test is not
- useful unless you are hacking the XZ decompressor. xz_dec_test
- allocates a char device major dynamically to which one can write
- .xz files from userspace. The decompressed output is thrown away.
- Keep an eye on dmesg to see diagnostics printed by xz_dec_test.
- See the xz_dec_test source code for the details.
-
- For decompressing the kernel image, initramfs, and initrd, there
- is a wrapper function in lib/decompress_unxz.c. Its API is the
- same as in other decompress_*.c files, which is defined in
- include/linux/decompress/generic.h.
-
- scripts/xz_wrap.sh is a wrapper for the xz command line tool found
- from XZ Utils. The wrapper sets compression options to values suitable
- for compressing the kernel image.
-
- For kernel makefiles, two commands are provided for use with
- $(call if_needed). The kernel image should be compressed with
- $(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2
- dictionary. It will also append a four-byte trailer containing the
- uncompressed size of the file, which is needed by the boot code.
- Other things should be compressed with $(call if_needed,xzmisc)
- which will use no BCJ filter and 1 MiB LZMA2 dictionary.
+===================================
+
+The xz_dec module provides XZ decompressor with single-call (buffer
+to buffer) and multi-call (stateful) APIs. The usage of the xz_dec
+module is documented in include/linux/xz.h.
+
+The xz_dec_test module is for testing xz_dec. xz_dec_test is not
+useful unless you are hacking the XZ decompressor. xz_dec_test
+allocates a char device major dynamically to which one can write
+.xz files from userspace. The decompressed output is thrown away.
+Keep an eye on dmesg to see diagnostics printed by xz_dec_test.
+See the xz_dec_test source code for the details.
+
+For decompressing the kernel image, initramfs, and initrd, there
+is a wrapper function in lib/decompress_unxz.c. Its API is the
+same as in other decompress_*.c files, which is defined in
+include/linux/decompress/generic.h.
+
+scripts/xz_wrap.sh is a wrapper for the xz command line tool found
+from XZ Utils. The wrapper sets compression options to values suitable
+for compressing the kernel image.
+
+For kernel makefiles, two commands are provided for use with
+$(call if_needed). The kernel image should be compressed with
+$(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2
+dictionary. It will also append a four-byte trailer containing the
+uncompressed size of the file, which is needed by the boot code.
+Other things should be compressed with $(call if_needed,xzmisc)
+which will use no BCJ filter and 1 MiB LZMA2 dictionary.
Notes on compression options
+============================
- Since the XZ Embedded supports only streams with no integrity check or
- CRC32, make sure that you don't use some other integrity check type
- when encoding files that are supposed to be decoded by the kernel. With
- liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32
- when encoding. With the xz command line tool, use --check=none or
- --check=crc32.
-
- Using CRC32 is strongly recommended unless there is some other layer
- which will verify the integrity of the uncompressed data anyway.
- Double checking the integrity would probably be waste of CPU cycles.
- Note that the headers will always have a CRC32 which will be validated
- by the decoder; you can only change the integrity check type (or
- disable it) for the actual uncompressed data.
-
- In userspace, LZMA2 is typically used with dictionary sizes of several
- megabytes. The decoder needs to have the dictionary in RAM, thus big
- dictionaries cannot be used for files that are intended to be decoded
- by the kernel. 1 MiB is probably the maximum reasonable dictionary
- size for in-kernel use (maybe more is OK for initramfs). The presets
- in XZ Utils may not be optimal when creating files for the kernel,
- so don't hesitate to use custom settings. Example:
-
- xz --check=crc32 --lzma2=dict=512KiB inputfile
-
- An exception to above dictionary size limitation is when the decoder
- is used in single-call mode. Decompressing the kernel itself is an
- example of this situation. In single-call mode, the memory usage
- doesn't depend on the dictionary size, and it is perfectly fine to
- use a big dictionary: for maximum compression, the dictionary should
- be at least as big as the uncompressed data itself.
+Since the XZ Embedded supports only streams with no integrity check or
+CRC32, make sure that you don't use some other integrity check type
+when encoding files that are supposed to be decoded by the kernel. With
+liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32
+when encoding. With the xz command line tool, use --check=none or
+--check=crc32.
+
+Using CRC32 is strongly recommended unless there is some other layer
+which will verify the integrity of the uncompressed data anyway.
+Double checking the integrity would probably be waste of CPU cycles.
+Note that the headers will always have a CRC32 which will be validated
+by the decoder; you can only change the integrity check type (or
+disable it) for the actual uncompressed data.
+
+In userspace, LZMA2 is typically used with dictionary sizes of several
+megabytes. The decoder needs to have the dictionary in RAM, thus big
+dictionaries cannot be used for files that are intended to be decoded
+by the kernel. 1 MiB is probably the maximum reasonable dictionary
+size for in-kernel use (maybe more is OK for initramfs). The presets
+in XZ Utils may not be optimal when creating files for the kernel,
+so don't hesitate to use custom settings. Example::
+
+ xz --check=crc32 --lzma2=dict=512KiB inputfile
+
+An exception to above dictionary size limitation is when the decoder
+is used in single-call mode. Decompressing the kernel itself is an
+example of this situation. In single-call mode, the memory usage
+doesn't depend on the dictionary size, and it is perfectly fine to
+use a big dictionary: for maximum compression, the dictionary should
+be at least as big as the uncompressed data itself.
Future plans
+============
- Creating a limited XZ encoder may be considered if people think it is
- useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at
- the fastest settings, so it isn't clear if LZMA2 encoder is wanted
- into the kernel.
+Creating a limited XZ encoder may be considered if people think it is
+useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at
+the fastest settings, so it isn't clear if LZMA2 encoder is wanted
+into the kernel.
- Support for limited random-access reading is planned for the
- decompression code. I don't know if it could have any use in the
- kernel, but I know that it would be useful in some embedded projects
- outside the Linux kernel.
+Support for limited random-access reading is planned for the
+decompression code. I don't know if it could have any use in the
+kernel, but I know that it would be useful in some embedded projects
+outside the Linux kernel.
Conformance to the .xz file format specification
+================================================
- There are a couple of corner cases where things have been simplified
- at expense of detecting errors as early as possible. These should not
- matter in practice all, since they don't cause security issues. But
- it is good to know this if testing the code e.g. with the test files
- from XZ Utils.
+There are a couple of corner cases where things have been simplified
+at expense of detecting errors as early as possible. These should not
+matter in practice all, since they don't cause security issues. But
+it is good to know this if testing the code e.g. with the test files
+from XZ Utils.
Reporting bugs
+==============
- Before reporting a bug, please check that it's not fixed already
- at upstream. See <http://tukaani.org/xz/embedded.html> to get the
- latest code.
+Before reporting a bug, please check that it's not fixed already
+at upstream. See <http://tukaani.org/xz/embedded.html> to get the
+latest code.
- Report bugs to <lasse.collin@tukaani.org> or visit #tukaani on
- Freenode and talk to Larhzu. I don't actively read LKML or other
- kernel-related mailing lists, so if there's something I should know,
- you should email to me personally or use IRC.
+Report bugs to <lasse.collin@tukaani.org> or visit #tukaani on
+Freenode and talk to Larhzu. I don't actively read LKML or other
+kernel-related mailing lists, so if there's something I should know,
+you should email to me personally or use IRC.
- Don't bother Igor Pavlov with questions about the XZ implementation
- in the kernel or about XZ Utils. While these two implementations
- include essential code that is directly based on Igor Pavlov's code,
- these implementations aren't maintained nor supported by him.
+Don't bother Igor Pavlov with questions about the XZ implementation
+in the kernel or about XZ Utils. While these two implementations
+include essential code that is directly based on Igor Pavlov's code,
+these implementations aren't maintained nor supported by him.
diff --git a/Documentation/zorro.txt b/Documentation/zorro.txt
index d530971beb00..664072b017e3 100644
--- a/Documentation/zorro.txt
+++ b/Documentation/zorro.txt
@@ -1,12 +1,13 @@
- Writing Device Drivers for Zorro Devices
- ----------------------------------------
+========================================
+Writing Device Drivers for Zorro Devices
+========================================
-Written by Geert Uytterhoeven <geert@linux-m68k.org>
-Last revised: September 5, 2003
+:Author: Written by Geert Uytterhoeven <geert@linux-m68k.org>
+:Last revised: September 5, 2003
-1. Introduction
----------------
+Introduction
+------------
The Zorro bus is the bus used in the Amiga family of computers. Thanks to
AutoConfig(tm), it's 100% Plug-and-Play.
@@ -20,12 +21,12 @@ There are two types of Zorro buses, Zorro II and Zorro III:
with Zorro II. The Zorro III address space lies outside the first 16 MB.
-2. Probing for Zorro Devices
-----------------------------
+Probing for Zorro Devices
+-------------------------
-Zorro devices are found by calling `zorro_find_device()', which returns a
-pointer to the `next' Zorro device with the specified Zorro ID. A probe loop
-for the board with Zorro ID `ZORRO_PROD_xxx' looks like:
+Zorro devices are found by calling ``zorro_find_device()``, which returns a
+pointer to the ``next`` Zorro device with the specified Zorro ID. A probe loop
+for the board with Zorro ID ``ZORRO_PROD_xxx`` looks like::
struct zorro_dev *z = NULL;
@@ -35,8 +36,8 @@ for the board with Zorro ID `ZORRO_PROD_xxx' looks like:
...
}
-`ZORRO_WILDCARD' acts as a wildcard and finds any Zorro device. If your driver
-supports different types of boards, you can use a construct like:
+``ZORRO_WILDCARD`` acts as a wildcard and finds any Zorro device. If your driver
+supports different types of boards, you can use a construct like::
struct zorro_dev *z = NULL;
@@ -49,24 +50,24 @@ supports different types of boards, you can use a construct like:
}
-3. Zorro Resources
-------------------
+Zorro Resources
+---------------
Before you can access a Zorro device's registers, you have to make sure it's
not yet in use. This is done using the I/O memory space resource management
-functions:
+functions::
request_mem_region()
release_mem_region()
-Shortcuts to claim the whole device's address space are provided as well:
+Shortcuts to claim the whole device's address space are provided as well::
zorro_request_device
zorro_release_device
-4. Accessing the Zorro Address Space
-------------------------------------
+Accessing the Zorro Address Space
+---------------------------------
The address regions in the Zorro device resources are Zorro bus address
regions. Due to the identity bus-physical address mapping on the Zorro bus,
@@ -78,26 +79,26 @@ The treatment of these regions depends on the type of Zorro space:
explicitly using z_ioremap().
Conversion from bus/physical Zorro II addresses to kernel virtual addresses
- and vice versa is done using:
+ and vice versa is done using::
virt_addr = ZTWO_VADDR(bus_addr);
bus_addr = ZTWO_PADDR(virt_addr);
- Zorro III address space must be mapped explicitly using z_ioremap() first
- before it can be accessed:
+ before it can be accessed::
virt_addr = z_ioremap(bus_addr, size);
...
z_iounmap(virt_addr);
-5. References
--------------
+References
+----------
-linux/include/linux/zorro.h
-linux/include/uapi/linux/zorro.h
-linux/include/uapi/linux/zorro_ids.h
-linux/arch/m68k/include/asm/zorro.h
-linux/drivers/zorro
-/proc/bus/zorro
+#. linux/include/linux/zorro.h
+#. linux/include/uapi/linux/zorro.h
+#. linux/include/uapi/linux/zorro_ids.h
+#. linux/arch/m68k/include/asm/zorro.h
+#. linux/drivers/zorro
+#. /proc/bus/zorro
diff --git a/MAINTAINERS b/MAINTAINERS
index 1347726cf9d5..297e610c9163 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1255,7 +1255,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://github.com/ulli-kroll/linux.git
S: Maintained
F: arch/arm/mach-gemini/
-F: drivers/rtc/rtc-gemini.c
+F: drivers/rtc/rtc-ftrtc010.c
ARM/CSR SIRFPRIMA2 MACHINE SUPPORT
M: Barry Song <baohua@kernel.org>
@@ -2516,10 +2516,10 @@ S: Supported
F: drivers/media/platform/sti/delta
BEFS FILE SYSTEM
-M: Luis de Bethencourt <luisbg@osg.samsung.com>
+M: Luis de Bethencourt <luisbg@kernel.org>
M: Salah Triki <salah.triki@gmail.com>
S: Maintained
-T: git git://github.com/luisbg/linux-befs.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/luisbg/linux-befs.git
F: Documentation/filesystems/befs.txt
F: fs/befs/
@@ -3974,6 +3974,12 @@ M: Pali Rohár <pali.rohar@gmail.com>
S: Maintained
F: drivers/platform/x86/dell-wmi.c
+DENALI NAND DRIVER
+M: Masahiro Yamada <yamada.masahiro@socionext.com>
+L: linux-mtd@lists.infradead.org
+S: Supported
+F: drivers/mtd/nand/denali*
+
DESIGNWARE USB2 DRD IP DRIVER
M: John Youn <johnyoun@synopsys.com>
L: linux-usb@vger.kernel.org
@@ -7548,6 +7554,15 @@ F: include/linux/kmemleak.h
F: mm/kmemleak.c
F: mm/kmemleak-test.c
+KMOD MODULE USERMODE HELPER
+M: "Luis R. Rodriguez" <mcgrof@kernel.org>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: kernel/kmod.c
+F: include/linux/kmod.h
+F: lib/test_kmod.c
+F: tools/testing/selftests/kmod/
+
KPROBES
M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
@@ -7715,6 +7730,7 @@ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
S: Supported
F: drivers/nvdimm/*
+F: drivers/acpi/nfit/*
F: include/linux/nd.h
F: include/linux/libnvdimm.h
F: include/uapi/linux/ndctl.h
@@ -7726,7 +7742,6 @@ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
F: drivers/nvdimm/blk.c
F: drivers/nvdimm/region_devs.c
-F: drivers/acpi/nfit*
LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
M: Vishal Verma <vishal.l.verma@intel.com>
@@ -8717,6 +8732,12 @@ F: Documentation/devicetree/bindings/mips/
F: Documentation/mips/
F: arch/mips/
+MIPS GENERIC PLATFORM
+M: Paul Burton <paul.burton@imgtec.com>
+L: linux-mips@linux-mips.org
+S: Supported
+F: arch/mips/generic/
+
MIPS/LOONGSON1 ARCHITECTURE
M: Keguang Zhang <keguang.zhang@gmail.com>
L: linux-mips@linux-mips.org
@@ -8726,6 +8747,16 @@ F: arch/mips/include/asm/mach-loongson32/
F: drivers/*/*loongson1*
F: drivers/*/*/*loongson1*
+MIPS BOSTON DEVELOPMENT BOARD
+M: Paul Burton <paul.burton@imgtec.com>
+L: linux-mips@linux-mips.org
+S: Maintained
+F: Documentation/devicetree/bindings/clock/img,boston-clock.txt
+F: arch/mips/boot/dts/img/boston.dts
+F: arch/mips/configs/generic/board-boston.config
+F: drivers/clk/imgtec/clk-boston.c
+F: include/dt-bindings/clock/boston-clock.h
+
MIROSOUND PCM20 FM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -9375,6 +9406,12 @@ F: include/linux/ntb.h
F: include/linux/ntb_transport.h
F: tools/testing/selftests/ntb/
+NTB IDT DRIVER
+M: Serge Semin <fancer.lancer@gmail.com>
+L: linux-ntb@googlegroups.com
+S: Supported
+F: drivers/ntb/hw/idt/
+
NTB INTEL DRIVER
M: Jon Mason <jdmason@kudzu.us>
M: Dave Jiang <dave.jiang@intel.com>
@@ -10831,6 +10868,14 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/qedf/
+QLOGIC QL4xxx RDMA DRIVER
+M: Ram Amrani <Ram.Amrani@cavium.com>
+M: Ariel Elior <Ariel.Elior@cavium.com>
+L: linux-rdma@vger.kernel.org
+S: Supported
+F: drivers/infiniband/hw/qedr/
+F: include/uapi/rdma/qedr-abi.h
+
QNX4 FILESYSTEM
M: Anders Larsen <al@alarsen.net>
W: http://www.alarsen.net/linux/qnx4fs/
@@ -12464,7 +12509,8 @@ M: Marek Vasut <marek.vasut@gmail.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
-T: git git://github.com/spi-nor/linux.git
+T: git git://git.infradead.org/linux-mtd.git spi-nor/fixes
+T: git git://git.infradead.org/l2-mtd.git spi-nor/next
S: Maintained
F: drivers/mtd/spi-nor/
F: include/linux/mtd/spi-nor.h
diff --git a/Makefile b/Makefile
index 547947ff87de..b4fb9a1d1594 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
-PATCHLEVEL = 12
+PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Fearless Coyote
# *DOCUMENTATION*
@@ -294,10 +294,17 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
else if [ -x /bin/bash ]; then echo /bin/bash; \
else echo sh; fi ; fi)
+HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS)
+HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS)
+HOST_LFS_LIBS := $(shell getconf LFS_LIBS)
+
HOSTCC = gcc
HOSTCXX = g++
-HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
-HOSTCXXFLAGS = -O2
+HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \
+ -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS)
+HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS)
+HOSTLDFLAGS := $(HOST_LFS_LDFLAGS)
+HOST_LOADLIBES := $(HOST_LFS_LIBS)
ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
@@ -408,7 +415,7 @@ KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(S
export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP
+export CPP AR NM STRIP OBJCOPY OBJDUMP HOSTLDFLAGS HOST_LOADLIBES
export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
@@ -456,10 +463,11 @@ ifneq ($(KBUILD_SRC),)
endif
# Support for using generic headers in asm-generic
-PHONY += asm-generic
-asm-generic:
+PHONY += asm-generic uapi-asm-generic
+asm-generic: uapi-asm-generic
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
src=asm obj=arch/$(SRCARCH)/include/generated/asm
+uapi-asm-generic:
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
src=uapi/asm obj=arch/$(SRCARCH)/include/generated/uapi/asm
@@ -1136,7 +1144,7 @@ firmware_install:
#Default location for installed headers
export INSTALL_HDR_PATH = $(objtree)/usr
-# If we do an all arch process set dst to asm-$(hdr-arch)
+# If we do an all arch process set dst to include/arch-$(hdr-arch)
hdr-dst = $(if $(KBUILD_HEADERS), dst=include/arch-$(hdr-arch), dst=include)
PHONY += archheaders
@@ -1146,7 +1154,7 @@ PHONY += archscripts
archscripts:
PHONY += __headers
-__headers: $(version_h) scripts_basic asm-generic archheaders archscripts
+__headers: $(version_h) scripts_basic uapi-asm-generic archheaders archscripts
$(Q)$(MAKE) $(build)=scripts build_unifdef
PHONY += headers_install_all
@@ -1157,7 +1165,7 @@ PHONY += headers_install
headers_install: __headers
$(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/uapi/asm/Kbuild),, \
$(error Headers not exportable for the $(SRCARCH) architecture))
- $(Q)$(MAKE) $(hdr-inst)=include/uapi
+ $(Q)$(MAKE) $(hdr-inst)=include/uapi dst=include
$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi $(hdr-dst)
PHONY += headers_check_all
@@ -1166,7 +1174,7 @@ headers_check_all: headers_install_all
PHONY += headers_check
headers_check: headers_install
- $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1
+ $(Q)$(MAKE) $(hdr-inst)=include/uapi dst=include HDRCHECK=1
$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi $(hdr-dst) HDRCHECK=1
# ---------------------------------------------------------------------------
diff --git a/arch/Kconfig b/arch/Kconfig
index cae0958a2298..21d0089117fe 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -198,9 +198,6 @@ config HAVE_KPROBES_ON_FTRACE
config HAVE_NMI
bool
-config HAVE_NMI_WATCHDOG
- depends on HAVE_NMI
- bool
#
# An arch should select this if it provides all these things:
#
@@ -226,6 +223,12 @@ config GENERIC_SMP_IDLE_THREAD
config GENERIC_IDLE_POLL_SETUP
bool
+config ARCH_HAS_FORTIFY_SOURCE
+ bool
+ help
+ An architecture should select this when it can successfully
+ build and run with CONFIG_FORTIFY_SOURCE.
+
# Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h
config ARCH_HAS_SET_MEMORY
bool
@@ -288,6 +291,28 @@ config HAVE_PERF_EVENTS_NMI
subsystem. Also has support for calculating CPU cycle events
to determine how many clock cycles in a given period.
+config HAVE_HARDLOCKUP_DETECTOR_PERF
+ bool
+ depends on HAVE_PERF_EVENTS_NMI
+ help
+ The arch chooses to use the generic perf-NMI-based hardlockup
+ detector. Must define HAVE_PERF_EVENTS_NMI.
+
+config HAVE_NMI_WATCHDOG
+ depends on HAVE_NMI
+ bool
+ help
+ The arch provides a low level NMI watchdog. It provides
+ asm/nmi.h, and defines its own arch_touch_nmi_watchdog().
+
+config HAVE_HARDLOCKUP_DETECTOR_ARCH
+ bool
+ select HAVE_NMI_WATCHDOG
+ help
+ The arch chooses to provide its own hardlockup detector, which is
+ a superset of the HAVE_NMI_WATCHDOG. It also conforms to config
+ interfaces and parameters provided by hardlockup detector subsystem.
+
config HAVE_PERF_REGS
bool
help
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 3e74ca5e6402..353dae386b2f 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -1,51 +1,27 @@
-generic-y += auxvec.h
-generic-y += bitsperlong.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += extable.h
generic-y += fb.h
-generic-y += fcntl.h
generic-y += ftrace.h
generic-y += hardirq.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kmap_types.h
-generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
-generic-y += msgbuf.h
generic-y += msi.h
-generic-y += param.h
generic-y += parport.h
generic-y += pci.h
generic-y += percpu.h
-generic-y += poll.h
-generic-y += posix_types.h
generic-y += preempt.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += types.h
-generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index b55fc2ae1e8c..fa6d0ff4ff89 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -1,4 +1,28 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += kvm_para.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index d8360501c082..721ab5ecfb9b 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -1,39 +1,23 @@
-
-
-generic-y += bitsperlong.h
generic-y += clkdev.h
generic-y += current.h
generic-y += early_ioremap.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
-generic-y += ioctl.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
-generic-y += msgbuf.h
generic-y += msi.h
-generic-y += param.h
generic-y += parport.h
-generic-y += poll.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += rwsem.h
generic-y += seccomp.h
generic-y += segment.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += shmbuf.h
generic-y += simd.h
generic-y += sizes.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
generic-y += unaligned.h
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d69bebf697e7..74504b154256 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -116,7 +116,7 @@ struct cpu_cache_fns {
void (*dma_unmap_area)(const void *, size_t, int);
void (*dma_flush_range)(const void *, const void *);
-};
+} __no_randomize_layout;
/*
* Select the calling method
diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h
index acf1d14b89a6..29d3a1524bce 100644
--- a/arch/arm/include/asm/flat.h
+++ b/arch/arm/include/asm/flat.h
@@ -5,12 +5,31 @@
#ifndef __ARM_FLAT_H__
#define __ARM_FLAT_H__
+#include <linux/uaccess.h>
+
#define flat_argvp_envp_on_stack() 1
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
- ({ unsigned long __val; __get_user_unaligned(__val, rp); __val; })
-#define flat_put_addr_at_rp(rp, val, relval) __put_user_unaligned(val, rp)
+
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr, u32 *persistent)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ return copy_from_user(addr, rp, 4) ? -EFAULT : 0;
+#else
+ return get_user(*addr, rp);
+#endif
+}
+
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ return copy_to_user(rp, &addr, 4) ? -EFAULT : 0;
+#else
+ return put_user(addr, rp);
+#endif
+}
+
#define flat_get_relocate_addr(rel) (rel)
#define flat_set_persistent(relval, p) 0
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 58508900c4bb..14b5903f0224 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -110,8 +110,8 @@ void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
-void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp);
-void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp);
+asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
+asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
static inline bool __vfp_enabled(void)
{
return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
@@ -120,8 +120,8 @@ static inline bool __vfp_enabled(void)
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
-int asmlinkage __guest_enter(struct kvm_vcpu *vcpu,
+asmlinkage int __guest_enter(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *host);
-int asmlinkage __hyp_do_panic(const char *, int, u32);
+asmlinkage int __hyp_do_panic(const char *, int, u32);
#endif /* __ARM_KVM_HYP_H__ */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 6838abc04279..0bf2347495f1 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -17,13 +17,6 @@
#include <asm/unified.h>
#include <asm/compiler.h>
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#include <asm-generic/uaccess-unaligned.h>
-#else
-#define __get_user_unaligned __get_user
-#define __put_user_unaligned __put_user
-#endif
-
#include <asm/extable.h>
/*
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 5fb3368e70cb..8e17fe80b55b 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -5,4 +5,18 @@ generated-y += unistd-common.h
generated-y += unistd-oabi.h
generated-y += unistd-eabi.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += termbits.h
+generic-y += termios.h
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 56dc1a3a33b4..c1809fb549dd 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -480,7 +480,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
ret = pcibios_init_resource(nr, sys, hw->io_optional);
if (ret) {
- kfree(sys);
+ pci_free_host_bridge(bridge);
break;
}
diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c
index b143c4659346..7fc11a3c17b4 100644
--- a/arch/arm/mach-sa1100/jornada720_ssp.c
+++ b/arch/arm/mach-sa1100/jornada720_ssp.c
@@ -33,7 +33,7 @@ static unsigned long jornada_ssp_flags;
* we need to reverse all data we receive from the mcu due to its physical location
* returns : 01110111 -> 11101110
*/
-u8 inline jornada_ssp_reverse(u8 byte)
+inline u8 jornada_ssp_reverse(u8 byte)
{
return
((0x80 & byte) >> 7) |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8addb851ab5e..dfd908630631 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -12,6 +12,7 @@ config ARM64
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index a7a97a608033..f81c7b685fc6 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -6,41 +6,24 @@ generic-y += dma.h
generic-y += dma-contiguous.h
generic-y += early_ioremap.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
-generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
-generic-y += msgbuf.h
generic-y += msi.h
-generic-y += poll.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += rwsem.h
generic-y += segment.h
-generic-y += sembuf.h
generic-y += serial.h
generic-y += set_memory.h
-generic-y += shmbuf.h
generic-y += simd.h
generic-y += sizes.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += swab.h
generic-y += switch_to.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += trace_clock.h
-generic-y += types.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
index fe5e287dc56b..b86a0865ddf1 100644
--- a/arch/arm64/include/asm/stackprotector.h
+++ b/arch/arm64/include/asm/stackprotector.h
@@ -30,6 +30,7 @@ static __always_inline void boot_init_stack_canary(void)
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= LINUX_VERSION_CODE;
+ canary &= CANARY_MASK;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index 2eb714c4639f..d0aa42907569 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -63,6 +63,11 @@ extern int memcmp(const void *, const void *, size_t);
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
#endif
#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 59f09e6a6cb8..8f0a1de11e4a 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -254,8 +254,6 @@ do { \
(void)0; \
})
-#define __get_user_unaligned __get_user
-
#define get_user(x, ptr) \
({ \
__typeof__(*(ptr)) __user *__p = (ptr); \
@@ -320,8 +318,6 @@ do { \
(void)0; \
})
-#define __put_user_unaligned __put_user
-
#define put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __user *__p = (ptr); \
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index 13a97aa2285f..fc28bd95c6d3 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -1,4 +1,20 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += errno.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
generic-y += kvm_para.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += poll.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += swab.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index adc208c2ae9c..decccffb03ca 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -35,7 +35,7 @@
* Leave enough space between the mmap area and the stack to honour ulimit in
* the face of randomisation.
*/
-#define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1))
+#define MIN_GAP (SZ_128M)
#define MAX_GAP (STACK_TOP/6*5)
static int mmap_is_legacy(void)
@@ -65,6 +65,11 @@ unsigned long arch_mmap_rnd(void)
static unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
+ unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
+
+ /* Values close to RLIM_INFINITY can overflow. */
+ if (gap + pad > gap)
+ gap += pad;
if (gap < MIN_GAP)
gap = MIN_GAP;
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index dc4ef9ac1e83..fe736973630f 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -1,50 +1,28 @@
-
-generic-y += auxvec.h
-generic-y += bitsperlong.h
generic-y += bugs.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += extable.h
generic-y += fb.h
generic-y += futex.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
-generic-y += kvm_para.h
+generic-y += kprobes.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
generic-y += percpu.h
generic-y += pgalloc.h
generic-y += preempt.h
-generic-y += resource.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += setup.h
-generic-y += shmbuf.h
-generic-y += shmparam.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += types.h
-generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/blackfin/include/asm/flat.h b/arch/blackfin/include/asm/flat.h
index c1314c56dd18..f1d6ba7afbf2 100644
--- a/arch/blackfin/include/asm/flat.h
+++ b/arch/blackfin/include/asm/flat.h
@@ -14,23 +14,28 @@
#define flat_argvp_envp_on_stack() 0
#define flat_old_ram_flag(flags) (flags)
-extern unsigned long bfin_get_addr_from_rp (unsigned long *ptr,
- unsigned long relval,
- unsigned long flags,
- unsigned long *persistent);
+extern unsigned long bfin_get_addr_from_rp (u32 *ptr, u32 relval,
+ u32 flags, u32 *persistent);
-extern void bfin_put_addr_at_rp(unsigned long *ptr, unsigned long addr,
- unsigned long relval);
+extern void bfin_put_addr_at_rp(u32 *ptr, u32 addr, u32 relval);
/* The amount by which a relocation can exceed the program image limits
without being regarded as an error. */
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
- bfin_get_addr_from_rp(rp, relval, flags, persistent)
-#define flat_put_addr_at_rp(rp, val, relval) \
- bfin_put_addr_at_rp(rp, val, relval)
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr, u32 *persistent)
+{
+ *addr = bfin_get_addr_from_rp(rp, relval, flags, persistent);
+ return 0;
+}
+
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 val, u32 relval)
+{
+ bfin_put_addr_at_rp(rp, val, relval);
+ return 0;
+}
/* Convert a relocation entry into an address. */
static inline unsigned long
@@ -39,8 +44,7 @@ flat_get_relocate_addr (unsigned long relval)
return relval & 0x03ffffff; /* Mask out top 6 bits */
}
-static inline int flat_set_persistent(unsigned long relval,
- unsigned long *persistent)
+static inline int flat_set_persistent(u32 relval, u32 *persistent)
{
int type = (relval >> 26) & 7;
if (type == 3) {
diff --git a/arch/blackfin/include/asm/nmi.h b/arch/blackfin/include/asm/nmi.h
index b9caac4fcfd8..107d23705f46 100644
--- a/arch/blackfin/include/asm/nmi.h
+++ b/arch/blackfin/include/asm/nmi.h
@@ -9,4 +9,6 @@
#include <linux/nmi.h>
+extern void arch_touch_nmi_watchdog(void);
+
#endif
diff --git a/arch/blackfin/include/uapi/asm/Kbuild b/arch/blackfin/include/uapi/asm/Kbuild
index b15bf6bc0e94..aa624b4ab655 100644
--- a/arch/blackfin/include/uapi/asm/Kbuild
+++ b/arch/blackfin/include/uapi/asm/Kbuild
@@ -1,2 +1,24 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += kvm_para.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
diff --git a/arch/blackfin/kernel/flat.c b/arch/blackfin/kernel/flat.c
index b5b658449616..8ebc54daaa8e 100644
--- a/arch/blackfin/kernel/flat.c
+++ b/arch/blackfin/kernel/flat.c
@@ -13,14 +13,14 @@
#define FLAT_BFIN_RELOC_TYPE_16H_BIT 1
#define FLAT_BFIN_RELOC_TYPE_32_BIT 2
-unsigned long bfin_get_addr_from_rp(unsigned long *ptr,
- unsigned long relval,
- unsigned long flags,
- unsigned long *persistent)
+unsigned long bfin_get_addr_from_rp(u32 *ptr,
+ u32 relval,
+ u32 flags,
+ u32 *persistent)
{
unsigned short *usptr = (unsigned short *)ptr;
int type = (relval >> 26) & 7;
- unsigned long val;
+ u32 val;
switch (type) {
case FLAT_BFIN_RELOC_TYPE_16_BIT:
@@ -32,7 +32,7 @@ unsigned long bfin_get_addr_from_rp(unsigned long *ptr,
break;
case FLAT_BFIN_RELOC_TYPE_32_BIT:
- pr_debug("*ptr = %lx", get_unaligned(ptr));
+ pr_debug("*ptr = %x", get_unaligned(ptr));
val = get_unaligned(ptr);
break;
@@ -59,8 +59,7 @@ EXPORT_SYMBOL(bfin_get_addr_from_rp);
* Insert the address ADDR into the symbol reference at RP;
* RELVAL is the raw relocation-table entry from which RP is derived
*/
-void bfin_put_addr_at_rp(unsigned long *ptr, unsigned long addr,
- unsigned long relval)
+void bfin_put_addr_at_rp(u32 *ptr, u32 addr, u32 relval)
{
unsigned short *usptr = (unsigned short *)ptr;
int type = (relval >> 26) & 7;
@@ -78,7 +77,7 @@ void bfin_put_addr_at_rp(unsigned long *ptr, unsigned long addr,
case FLAT_BFIN_RELOC_TYPE_32_BIT:
put_unaligned(addr, ptr);
- pr_debug("new ptr =%lx", get_unaligned(ptr));
+ pr_debug("new ptr =%x", get_unaligned(ptr));
break;
}
}
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c
index 633c37083e87..1e714329fe8a 100644
--- a/arch/blackfin/kernel/nmi.c
+++ b/arch/blackfin/kernel/nmi.c
@@ -190,7 +190,7 @@ static int __init init_nmi_wdt(void)
}
device_initcall(init_nmi_wdt);
-void touch_nmi_watchdog(void)
+void arch_touch_nmi_watchdog(void)
{
atomic_set(&nmi_touched[smp_processor_id()], 1);
}
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index a3c8d05c4cc7..d717329c8cf9 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -1,8 +1,5 @@
-
generic-y += atomic.h
-generic-y += auxvec.h
generic-y += barrier.h
-generic-y += bitsperlong.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += current.h
@@ -10,55 +7,32 @@ generic-y += device.h
generic-y += div64.h
generic-y += dma.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
-generic-y += fcntl.h
generic-y += futex.h
generic-y += hw_irq.h
generic-y += io.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
+generic-y += kprobes.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
generic-y += mmu.h
generic-y += mmu_context.h
-generic-y += msgbuf.h
-generic-y += param.h
generic-y += pci.h
generic-y += percpu.h
generic-y += pgalloc.h
-generic-y += poll.h
-generic-y += posix_types.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += segment.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += shmbuf.h
-generic-y += shmparam.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += tlbflush.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += types.h
-generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/c6x/include/asm/flat.h b/arch/c6x/include/asm/flat.h
index a1858bd5f6c8..6f1feb00bd52 100644
--- a/arch/c6x/include/asm/flat.h
+++ b/arch/c6x/include/asm/flat.h
@@ -1,11 +1,22 @@
#ifndef __ASM_C6X_FLAT_H
#define __ASM_C6X_FLAT_H
+#include <asm/unaligned.h>
+
#define flat_argvp_envp_on_stack() 0
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp)
-#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val, rp)
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr, u32 *persistent)
+{
+ *addr = get_unaligned((__force u32 *)rp);
+ return 0;
+}
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+ put_unaligned(addr, (__force u32 *)rp);
+ return 0;
+}
#define flat_get_relocate_addr(rel) (rel)
#define flat_set_persistent(relval, p) 0
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild
index 1c44d3b3eba0..67ee896a76a7 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/c6x/include/uapi/asm/Kbuild
@@ -1,5 +1,30 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
generic-y += kvm_para.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
generic-y += siginfo.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
diff --git a/arch/cris/arch-v10/drivers/gpio.c b/arch/cris/arch-v10/drivers/gpio.c
index 64285e0d3481..dfd3b3ba5e4e 100644
--- a/arch/cris/arch-v10/drivers/gpio.c
+++ b/arch/cris/arch-v10/drivers/gpio.c
@@ -399,7 +399,7 @@ out:
/* Main device API. ioctl's to read/set/clear bits, as well as to
* set alarms to wait for using a subsequent select().
*/
-unsigned long inline setget_input(struct gpio_private *priv, unsigned long arg)
+inline unsigned long setget_input(struct gpio_private *priv, unsigned long arg)
{
/* Set direction 0=unchanged 1=input,
* return mask with 1=input */
@@ -450,7 +450,7 @@ unsigned long inline setget_input(struct gpio_private *priv, unsigned long arg)
return dir_g_in_bits;
} /* setget_input */
-unsigned long inline setget_output(struct gpio_private *priv, unsigned long arg)
+inline unsigned long setget_output(struct gpio_private *priv, unsigned long arg)
{
if (USE_PORTS(priv)) {
*priv->dir = *priv->dir_shadow |=
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index acc5781100c2..460349cb147f 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -1,48 +1,31 @@
generic-y += atomic.h
-generic-y += auxvec.h
generic-y += barrier.h
-generic-y += bitsperlong.h
generic-y += clkdev.h
generic-y += cmpxchg.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
-generic-y += errno.h
+generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
-generic-y += emergency-restart.h
-generic-y += fcntl.h
generic-y += futex.h
generic-y += hardirq.h
-generic-y += ioctl.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
-generic-y += kvm_para.h
+generic-y += kprobes.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
generic-y += module.h
-generic-y += msgbuf.h
generic-y += percpu.h
-generic-y += poll.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += sections.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += statfs.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += types.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index b55fc2ae1e8c..3687b54bb18e 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -1,4 +1,21 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += kvm_para.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += poll.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += statfs.h
+generic-y += types.h
diff --git a/arch/frv/include/asm/tlbflush.h b/arch/frv/include/asm/tlbflush.h
index 7ac5eafc5d98..75879420f578 100644
--- a/arch/frv/include/asm/tlbflush.h
+++ b/arch/frv/include/asm/tlbflush.h
@@ -18,10 +18,10 @@
#ifdef CONFIG_MMU
#ifndef __ASSEMBLY__
-extern void asmlinkage __flush_tlb_all(void);
-extern void asmlinkage __flush_tlb_mm(unsigned long contextid);
-extern void asmlinkage __flush_tlb_page(unsigned long contextid, unsigned long start);
-extern void asmlinkage __flush_tlb_range(unsigned long contextid,
+extern asmlinkage void __flush_tlb_all(void);
+extern asmlinkage void __flush_tlb_mm(unsigned long contextid);
+extern asmlinkage void __flush_tlb_page(unsigned long contextid, unsigned long start);
+extern asmlinkage void __flush_tlb_range(unsigned long contextid,
unsigned long start, unsigned long end);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 99c824608a31..bc077491d299 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -1,5 +1,4 @@
generic-y += asm-offsets.h
-generic-y += auxvec.h
generic-y += barrier.h
generic-y += bugs.h
generic-y += cacheflush.h
@@ -11,66 +10,41 @@ generic-y += device.h
generic-y += div64.h
generic-y += dma.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
-generic-y += fcntl.h
generic-y += ftrace.h
generic-y += futex.h
generic-y += hardirq.h
generic-y += hash.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
-generic-y += kvm_para.h
+generic-y += kprobes.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
generic-y += mmu.h
generic-y += mmu_context.h
generic-y += module.h
-generic-y += msgbuf.h
-generic-y += param.h
generic-y += parport.h
generic-y += percpu.h
generic-y += pgalloc.h
-generic-y += poll.h
-generic-y += posix_types.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += setup.h
-generic-y += shmbuf.h
-generic-y += shmparam.h
generic-y += sizes.h
-generic-y += socket.h
-generic-y += sockios.h
generic-y += spinlock.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += swab.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += timex.h
generic-y += tlbflush.h
-generic-y += trace_clock.h
generic-y += topology.h
-generic-y += types.h
-generic-y += ucontext.h
+generic-y += trace_clock.h
generic-y += unaligned.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/h8300/include/asm/flat.h b/arch/h8300/include/asm/flat.h
index a4898eccf2bf..7e0bd6fa1532 100644
--- a/arch/h8300/include/asm/flat.h
+++ b/arch/h8300/include/asm/flat.h
@@ -5,6 +5,8 @@
#ifndef __H8300_FLAT_H__
#define __H8300_FLAT_H__
+#include <asm/unaligned.h>
+
#define flat_argvp_envp_on_stack() 1
#define flat_old_ram_flag(flags) 1
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
@@ -18,11 +20,21 @@
*/
#define flat_get_relocate_addr(rel) (rel & ~0x00000001)
-#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
- ({(void)persistent; \
- get_unaligned(rp) & (((flags) & FLAT_FLAG_GOTPIC) ? \
- 0xffffffff : 0x00ffffff); })
-#define flat_put_addr_at_rp(rp, addr, rel) \
- put_unaligned(((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), (rp))
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr, u32 *persistent)
+{
+ u32 val = get_unaligned((__force u32 *)rp);
+ if (!(flags & FLAT_FLAG_GOTPIC))
+ val &= 0x00ffffff;
+ *addr = val;
+ return 0;
+}
+
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+ u32 *p = (__force u32 *)rp;
+ put_unaligned((addr & 0x00ffffff) | (*(char *)p << 24), p);
+ return 0;
+}
#endif /* __H8300_FLAT_H__ */
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index b55fc2ae1e8c..187aed820e71 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -1,4 +1,30 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += kvm_para.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += swab.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 0fc9cb04e6ad..34013683d123 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -1,4 +1,3 @@
-generic-y += auxvec.h
generic-y += barrier.h
generic-y += bug.h
generic-y += bugs.h
@@ -7,53 +6,32 @@ generic-y += current.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += extable.h
generic-y += fb.h
-generic-y += fcntl.h
generic-y += ftrace.h
generic-y += hardirq.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
generic-y += iomap.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
+generic-y += kprobes.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
-generic-y += msgbuf.h
generic-y += pci.h
generic-y += percpu.h
-generic-y += poll.h
-generic-y += posix_types.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += rwsem.h
generic-y += sections.h
generic-y += segment.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += shmbuf.h
-generic-y += shmparam.h
generic-y += sizes.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += types.h
-generic-y += ucontext.h
generic-y += unaligned.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
index b55fc2ae1e8c..cb5df3aad3a8 100644
--- a/arch/hexagon/include/uapi/asm/Kbuild
+++ b/arch/hexagon/include/uapi/asm/Kbuild
@@ -1,4 +1,26 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 502a91d8dbbd..1d7641f891e1 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -1,8 +1,6 @@
-
generic-y += clkdev.h
generic-y += exec.h
generic-y += irq_work.h
-generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index b2106b01e84f..0890ded638f0 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -87,42 +87,6 @@ static inline int __access_ok(const void __user *p, unsigned long size)
#define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
-extern long __put_user_unaligned_unknown (void);
-
-#define __put_user_unaligned(x, ptr) \
-({ \
- long __ret; \
- switch (sizeof(*(ptr))) { \
- case 1: __ret = __put_user((x), (ptr)); break; \
- case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \
- | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
- case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \
- | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
- case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \
- | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
- default: __ret = __put_user_unaligned_unknown(); \
- } \
- __ret; \
-})
-
-extern long __get_user_unaligned_unknown (void);
-
-#define __get_user_unaligned(x, ptr) \
-({ \
- long __ret; \
- switch (sizeof(*(ptr))) { \
- case 1: __ret = __get_user((x), (ptr)); break; \
- case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \
- | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
- case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \
- | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
- case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \
- | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
- default: __ret = __get_user_unaligned_unknown(); \
- } \
- __ret; \
-})
-
#ifdef ASM_SUPPORTED
struct __large_struct { unsigned long buf[100]; };
# define __m(x) (*(struct __large_struct __user *)(x))
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index 599507bcec91..c14815dca747 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -163,8 +163,3 @@ void arch_crash_save_vmcoreinfo(void)
#endif
}
-phys_addr_t paddr_vmcoreinfo_note(void)
-{
- return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note);
-}
-
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 79c7c46d7dc1..555b11180156 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -334,7 +334,7 @@ static void ia64_mlogbuf_dump_from_init(void)
ia64_mlogbuf_dump();
}
-static void inline
+static inline void
ia64_mca_spin(const char *func)
{
if (monarch_cpu == smp_processor_id())
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index 5bc34eac9e01..b67bb4cb73ff 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -140,7 +140,7 @@ static inline u64 __iomem *pcibr_ate_addr(struct pcibus_info *pcibus_info,
/*
* Update the ate.
*/
-void inline
+inline void
ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
volatile u64 ate)
{
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 46d3df4b03a1..3bd9abc35485 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -52,7 +52,7 @@
* All registers defined in struct tioce will meet that criteria.
*/
-static void inline
+static inline void
tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr)
{
u64 mmr_base;
@@ -78,7 +78,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr)
}
}
-static void inline
+static inline void
tioce_mmr_war_post(struct tioce_kernel *kern, void __iomem *mmr_addr)
{
u64 mmr_base;
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index c000ffac8586..7e11b125c35e 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -1,10 +1,9 @@
-
generic-y += clkdev.h
generic-y += current.h
generic-y += exec.h
generic-y += extable.h
generic-y += irq_work.h
-generic-y += kvm_para.h
+generic-y += kprobes.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += module.h
@@ -12,4 +11,3 @@ generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
-generic-y += kprobes.h
diff --git a/arch/m32r/include/asm/flat.h b/arch/m32r/include/asm/flat.h
index 5d711c4688fb..455ce7ddbf14 100644
--- a/arch/m32r/include/asm/flat.h
+++ b/arch/m32r/include/asm/flat.h
@@ -17,11 +17,6 @@
#define flat_set_persistent(relval, p) 0
#define flat_reloc_valid(reloc, size) \
(((reloc) - textlen_for_m32r_lo16_data) <= (size))
-#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
- m32r_flat_get_addr_from_rp(rp, relval, (text_len) )
-
-#define flat_put_addr_at_rp(rp, addr, relval) \
- m32r_flat_put_addr_at_rp(rp, addr, relval)
/* Convert a relocation entry into an address. */
static inline unsigned long
@@ -57,9 +52,9 @@ flat_get_relocate_addr (unsigned long relval)
static unsigned long textlen_for_m32r_lo16_data = 0;
-static inline unsigned long m32r_flat_get_addr_from_rp (unsigned long *rp,
- unsigned long relval,
- unsigned long textlen)
+static inline unsigned long m32r_flat_get_addr_from_rp (u32 *rp,
+ u32 relval,
+ u32 textlen)
{
unsigned int reloc = flat_m32r_get_reloc_type (relval);
textlen_for_m32r_lo16_data = 0;
@@ -100,9 +95,7 @@ static inline unsigned long m32r_flat_get_addr_from_rp (unsigned long *rp,
return ~0; /* bogus value */
}
-static inline void m32r_flat_put_addr_at_rp (unsigned long *rp,
- unsigned long addr,
- unsigned long relval)
+static inline void flat_put_addr_at_rp(u32 *rp, u32 addr, u32 relval)
{
unsigned int reloc = flat_m32r_get_reloc_type (relval);
if (reloc & 0xf0) {
@@ -142,4 +135,8 @@ static inline void m32r_flat_put_addr_at_rp (unsigned long *rp,
}
}
+// kludge - text_len is a local variable in the only user.
+#define flat_get_addr_from_rp(rp, relval, flags, addr, persistent) \
+ (m32r_flat_get_addr_from_rp(rp, relval, text_len), 0)
+
#endif /* __ASM_M32R_FLAT_H */
diff --git a/arch/m32r/include/uapi/asm/Kbuild b/arch/m32r/include/uapi/asm/Kbuild
index c94ee54210bc..1c44d3b3eba0 100644
--- a/arch/m32r/include/uapi/asm/Kbuild
+++ b/arch/m32r/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += siginfo.h
+generic-y += kvm_para.h
+generic-y += siginfo.h
diff --git a/arch/m68k/coldfire/intc-simr.c b/arch/m68k/coldfire/intc-simr.c
index 7cf2c156f72d..15c4b7a6e38f 100644
--- a/arch/m68k/coldfire/intc-simr.c
+++ b/arch/m68k/coldfire/intc-simr.c
@@ -35,7 +35,7 @@
#define EINT7 67 /* EDGE Port interrupt 7 */
static unsigned int irqebitmap[] = { 0, 1, 4, 7 };
-static unsigned int inline irq2ebit(unsigned int irq)
+static inline unsigned int irq2ebit(unsigned int irq)
{
return irqebitmap[irq - EINT0];
}
@@ -51,7 +51,7 @@ static unsigned int inline irq2ebit(unsigned int irq)
#define EINT1 65 /* EDGE Port interrupt 1 */
#define EINT7 71 /* EDGE Port interrupt 7 */
-static unsigned int inline irq2ebit(unsigned int irq)
+static inline unsigned int irq2ebit(unsigned int irq)
{
return irq - EINT0;
}
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 5ecf4e47b2e2..59d6d0d38f67 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,36 +1,25 @@
generic-y += barrier.h
-generic-y += bitsperlong.h
generic-y += clkdev.h
generic-y += device.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
generic-y += futex.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
-generic-y += kvm_para.h
+generic-y += kprobes.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
generic-y += percpu.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += sections.h
-generic-y += shmparam.h
generic-y += spinlock.h
-generic-y += statfs.h
-generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += types.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/m68k/include/asm/flat.h b/arch/m68k/include/asm/flat.h
index 00c392b0cabd..b2a41f5b3890 100644
--- a/arch/m68k/include/asm/flat.h
+++ b/arch/m68k/include/asm/flat.h
@@ -5,16 +5,32 @@
#ifndef __M68KNOMMU_FLAT_H__
#define __M68KNOMMU_FLAT_H__
+#include <linux/uaccess.h>
+
#define flat_argvp_envp_on_stack() 1
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-#define flat_get_addr_from_rp(rp, relval, flags, p) \
- ({ unsigned long __val; __get_user_unaligned(__val, rp); __val; })
-#define flat_put_addr_at_rp(rp, val, relval) __put_user_unaligned(val, rp)
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr, u32 *persistent)
+{
+#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
+ return copy_from_user(addr, rp, 4) ? -EFAULT : 0;
+#else
+ return get_user(*addr, rp);
+#endif
+}
+
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
+ return copy_to_user(rp, &addr, 4) ? -EFAULT : 0;
+#else
+ return put_user(addr, rp);
+#endif
+}
#define flat_get_relocate_addr(rel) (rel)
-static inline int flat_set_persistent(unsigned long relval,
- unsigned long *persistent)
+static inline int flat_set_persistent(u32 relval, u32 *persistent)
{
return 0;
}
diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h
index 67b3481d6020..63ba18e4c9a2 100644
--- a/arch/m68k/include/asm/uaccess.h
+++ b/arch/m68k/include/asm/uaccess.h
@@ -3,11 +3,4 @@
#else
#include <asm/uaccess_mm.h>
#endif
-
#include <asm/extable.h>
-#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
-#include <asm-generic/uaccess-unaligned.h>
-#else
-#define __get_user_unaligned(x, ptr) __get_user((x), (ptr))
-#define __put_user_unaligned(x, ptr) __put_user((x), (ptr))
-#endif
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index 68b45cc87e2c..3717b64a620d 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -2,11 +2,21 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += kvm_para.h
+generic-y += mman.h
generic-y += msgbuf.h
+generic-y += resource.h
generic-y += sembuf.h
generic-y += shmbuf.h
+generic-y += shmparam.h
generic-y += siginfo.h
generic-y += socket.h
generic-y += sockios.h
+generic-y += statfs.h
generic-y += termbits.h
generic-y += termios.h
+generic-y += types.h
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index 8f940553a579..3fba97ed9bb2 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -1,58 +1,34 @@
-generic-y += auxvec.h
-generic-y += bitsperlong.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += current.h
generic-y += device.h
generic-y += dma.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
-generic-y += fcntl.h
generic-y += futex.h
generic-y += hardirq.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
-generic-y += kvm_para.h
+generic-y += kprobes.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += msgbuf.h
-generic-y += param.h
generic-y += pci.h
generic-y += percpu.h
-generic-y += poll.h
-generic-y += posix_types.h
generic-y += preempt.h
generic-y += sections.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += shmbuf.h
-generic-y += shmparam.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
generic-y += switch_to.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
-generic-y += types.h
-generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild
index b29731ebd7a9..6ac763d9a3e3 100644
--- a/arch/metag/include/uapi/asm/Kbuild
+++ b/arch/metag/include/uapi/asm/Kbuild
@@ -1,6 +1,30 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += kvm_para.h
generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += posix_types.h
generic-y += resource.h
+generic-y += sembuf.h
generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 83a4ef3a2495..9d66f7793841 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,22 +1,15 @@
-
generic-y += barrier.h
generic-y += bitops.h
-generic-y += bitsperlong.h
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
-generic-y += fcntl.h
generic-y += hardirq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
@@ -27,31 +20,13 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
generic-y += parport.h
generic-y += percpu.h
-generic-y += poll.h
generic-y += preempt.h
-generic-y += resource.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += shmbuf.h
-generic-y += shmparam.h
-generic-y += siginfo.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += swab.h
generic-y += syscalls.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += ucontext.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/microblaze/include/asm/flat.h b/arch/microblaze/include/asm/flat.h
index 6847c1512c7b..f23c3d266bae 100644
--- a/arch/microblaze/include/asm/flat.h
+++ b/arch/microblaze/include/asm/flat.h
@@ -32,29 +32,27 @@
* reference
*/
-static inline unsigned long
-flat_get_addr_from_rp(unsigned long *rp, unsigned long relval,
- unsigned long flags, unsigned long *persistent)
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr, u32 *persistent)
{
- unsigned long addr;
- (void)flags;
+ u32 *p = (__force u32 *)rp;
/* Is it a split 64/32 reference? */
if (relval & 0x80000000) {
/* Grab the two halves of the reference */
- unsigned long val_hi, val_lo;
+ u32 val_hi, val_lo;
- val_hi = get_unaligned(rp);
- val_lo = get_unaligned(rp+1);
+ val_hi = get_unaligned(p);
+ val_lo = get_unaligned(p+1);
/* Crack the address out */
- addr = ((val_hi & 0xffff) << 16) + (val_lo & 0xffff);
+ *addr = ((val_hi & 0xffff) << 16) + (val_lo & 0xffff);
} else {
/* Get the address straight out */
- addr = get_unaligned(rp);
+ *addr = get_unaligned(p);
}
- return addr;
+ return 0;
}
/*
@@ -63,25 +61,27 @@ flat_get_addr_from_rp(unsigned long *rp, unsigned long relval,
*/
static inline void
-flat_put_addr_at_rp(unsigned long *rp, unsigned long addr, unsigned long relval)
+flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 relval)
{
+ u32 *p = (__force u32 *)rp;
/* Is this a split 64/32 reloc? */
if (relval & 0x80000000) {
/* Get the two "halves" */
- unsigned long val_hi = get_unaligned(rp);
- unsigned long val_lo = get_unaligned(rp + 1);
+ unsigned long val_hi = get_unaligned(p);
+ unsigned long val_lo = get_unaligned(p + 1);
/* insert the address */
val_hi = (val_hi & 0xffff0000) | addr >> 16;
val_lo = (val_lo & 0xffff0000) | (addr & 0xffff);
/* store the two halves back into memory */
- put_unaligned(val_hi, rp);
- put_unaligned(val_lo, rp+1);
+ put_unaligned(val_hi, p);
+ put_unaligned(val_lo, p+1);
} else {
/* Put it straight in, no messing around */
- put_unaligned(addr, rp);
+ put_unaligned(addr, p);
}
+ return 0;
}
#define flat_get_relocate_addr(rel) (rel & 0x7fffffff)
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index cb6784f4a427..e77a596f3f1e 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -1,5 +1,28 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += types.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
generic-y += siginfo.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += swab.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 45bcd1cfcec0..8dd20358464f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1,75 +1,77 @@
config MIPS
bool
default y
- select ARCH_SUPPORTS_UPROBES
+ select ARCH_BINFMT_ELF_STATE
+ select ARCH_CLOCKSOURCE_DATA
+ select ARCH_DISCARD_MEMBLOCK
+ select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
- select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
+ select ARCH_SUPPORTS_UPROBES
select ARCH_USE_BUILTIN_BSWAP
- select HAVE_CONTEXT_TRACKING
- select HAVE_GENERIC_DMA_COHERENT
- select HAVE_IDE
- select HAVE_IRQ_EXIT_ON_IRQ_STACK
- select HAVE_OPROFILE
- select HAVE_PERF_EVENTS
- select PERF_USE_VMALLOC
+ select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select BUILDTIME_EXTABLE_SORT
+ select CLONE_BACKWARDS
+ select CPU_PM if CPU_IDLE
+ select GENERIC_ATOMIC64 if !64BIT
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CMOS_UPDATE
+ select GENERIC_CPU_AUTOPROBE
+ select GENERIC_IRQ_PROBE
+ select GENERIC_IRQ_SHOW
+ select GENERIC_PCI_IOMAP
+ select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
+ select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_TIME_VSYSCALL
+ select HANDLE_DOMAIN_IRQ
+ select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select HAVE_CBPF_JIT if !CPU_MICROMIPS
- select HAVE_FUNCTION_TRACER
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
+ select HAVE_CBPF_JIT if (!64BIT && !CPU_MICROMIPS)
+ select HAVE_EBPF_JIT if (64BIT && !CPU_MICROMIPS)
+ select HAVE_CC_STACKPROTECTOR
+ select HAVE_CONTEXT_TRACKING
+ select HAVE_COPY_THREAD_TLS
+ select HAVE_C_RECORDMCOUNT
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_DMA_API_DEBUG
+ select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
+ select HAVE_EXIT_THREAD
select HAVE_FTRACE_MCOUNT_RECORD
- select HAVE_C_RECORDMCOUNT
select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
+ select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_IDE
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KPROBES
select HAVE_KRETPROBES
- select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_DEBUG_KMEMLEAK
- select HAVE_SYSCALL_TRACEPOINTS
- select ARCH_HAS_ELF_RANDOMIZE
- select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
- select RTC_LIB if !MACH_LOONGSON64
- select GENERIC_ATOMIC64 if !64BIT
- select HAVE_DMA_CONTIGUOUS
- select HAVE_DMA_API_DEBUG
- select GENERIC_IRQ_PROBE
- select GENERIC_IRQ_SHOW
- select GENERIC_PCI_IOMAP
- select HAVE_ARCH_JUMP_LABEL
- select ARCH_WANT_IPC_PARSE_VERSION
- select IRQ_FORCED_THREADING
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
- select ARCH_DISCARD_MEMBLOCK
- select GENERIC_SMP_IDLE_THREAD
- select BUILDTIME_EXTABLE_SORT
- select GENERIC_CPU_AUTOPROBE
- select GENERIC_CLOCKEVENTS
- select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
- select GENERIC_CMOS_UPDATE
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select VIRT_TO_BUS
- select MODULES_USE_ELF_REL if MODULES
+ select HAVE_OPROFILE
+ select HAVE_PERF_EVENTS
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN
+ select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA if MODULES && 64BIT
- select CLONE_BACKWARDS
- select HAVE_DEBUG_STACKOVERFLOW
- select HAVE_CC_STACKPROTECTOR
- select CPU_PM if CPU_IDLE
- select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select ARCH_BINFMT_ELF_STATE
+ select MODULES_USE_ELF_REL if MODULES
+ select PERF_USE_VMALLOC
+ select RTC_LIB if !MACH_LOONGSON64
select SYSCTL_EXCEPTION_TRACE
- select HAVE_VIRT_CPU_ACCOUNTING_GEN
- select HAVE_IRQ_TIME_ACCOUNTING
- select GENERIC_TIME_VSYSCALL
- select ARCH_CLOCKSOURCE_DATA
- select HANDLE_DOMAIN_IRQ
- select HAVE_EXIT_THREAD
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_COPY_THREAD_TLS
+ select VIRT_TO_BUS
menu "Machine selection"
@@ -1179,6 +1181,15 @@ config SYS_SUPPORTS_RELOCATABLE
The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF
to allow access to command line and entropy sources.
+config MIPS_CBPF_JIT
+ def_bool y
+ depends on BPF_JIT && HAVE_CBPF_JIT
+
+config MIPS_EBPF_JIT
+ def_bool y
+ depends on BPF_JIT && HAVE_EBPF_JIT
+
+
#
# Endianness selection. Sufficiently obscure so many users don't know what to
# answer,so we try hard to limit the available choices. Also the use of a
@@ -2062,7 +2073,7 @@ config CPU_SUPPORTS_UNCACHED_ACCELERATED
bool
config MIPS_PGD_C0_CONTEXT
bool
- default y if 64BIT && CPU_MIPSR2 && !CPU_XLP
+ default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP
#
# Set to y for ptrace access to watch registers.
@@ -2370,6 +2381,7 @@ config MIPS_CPS
select SMP
select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
select SYS_SUPPORTS_HOTPLUG_CPU
+ select SYS_SUPPORTS_SCHED_SMT if CPU_MIPSR6
select SYS_SUPPORTS_SMP
select WEAK_ORDERING
help
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 02a1787c888c..04343625b929 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -160,7 +160,7 @@ cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS
-Wa,-mips32 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-Wa,-mips32r2 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
-Wa,-mips64 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
diff --git a/arch/mips/boot/dts/img/Makefile b/arch/mips/boot/dts/img/Makefile
index 69a65f0f82d2..3d70958d0f5a 100644
--- a/arch/mips/boot/dts/img/Makefile
+++ b/arch/mips/boot/dts/img/Makefile
@@ -1,6 +1,7 @@
-dtb-$(CONFIG_MACH_PISTACHIO) += pistachio_marduk.dtb
+dtb-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += boston.dtb
-obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
+dtb-$(CONFIG_MACH_PISTACHIO) += pistachio_marduk.dtb
+obj-$(CONFIG_MACH_PISTACHIO) += pistachio_marduk.dtb.o
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts
new file mode 100644
index 000000000000..53bfa29a7093
--- /dev/null
+++ b/arch/mips/boot/dts/img/boston.dts
@@ -0,0 +1,224 @@
+/dts-v1/;
+
+#include <dt-bindings/clock/boston-clock.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "img,boston";
+
+ chosen {
+ stdout-path = "uart0:115200";
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "img,mips";
+ reg = <0>;
+ clocks = <&clk_boston BOSTON_CLK_CPU>;
+ };
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+ pci0: pci@10000000 {
+ compatible = "xlnx,axi-pcie-host-1.00.a";
+ device_type = "pci";
+ reg = <0x10000000 0x2000000>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 2 IRQ_TYPE_LEVEL_HIGH>;
+
+ ranges = <0x02000000 0 0x40000000
+ 0x40000000 0 0x40000000>;
+
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pci0_intc 1>,
+ <0 0 0 2 &pci0_intc 2>,
+ <0 0 0 3 &pci0_intc 3>,
+ <0 0 0 4 &pci0_intc 4>;
+
+ pci0_intc: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ pci1: pci@12000000 {
+ compatible = "xlnx,axi-pcie-host-1.00.a";
+ device_type = "pci";
+ reg = <0x12000000 0x2000000>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 1 IRQ_TYPE_LEVEL_HIGH>;
+
+ ranges = <0x02000000 0 0x20000000
+ 0x20000000 0 0x20000000>;
+
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pci1_intc 1>,
+ <0 0 0 2 &pci1_intc 2>,
+ <0 0 0 3 &pci1_intc 3>,
+ <0 0 0 4 &pci1_intc 4>;
+
+ pci1_intc: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ pci2: pci@14000000 {
+ compatible = "xlnx,axi-pcie-host-1.00.a";
+ device_type = "pci";
+ reg = <0x14000000 0x2000000>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 0 IRQ_TYPE_LEVEL_HIGH>;
+
+ ranges = <0x02000000 0 0x16000000
+ 0x16000000 0 0x100000>;
+
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pci2_intc 1>,
+ <0 0 0 2 &pci2_intc 2>,
+ <0 0 0 3 &pci2_intc 3>,
+ <0 0 0 4 &pci2_intc 4>;
+
+ pci2_intc: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ pci2_root@0,0,0 {
+ compatible = "pci10ee,7021";
+ reg = <0x00000000 0 0 0 0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ eg20t_bridge@1,0,0 {
+ compatible = "pci8086,8800";
+ reg = <0x00010000 0 0 0 0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ eg20t_mac@2,0,1 {
+ compatible = "pci8086,8802";
+ reg = <0x00020100 0 0 0 0>;
+ phy-reset-gpios = <&eg20t_gpio 6
+ GPIO_ACTIVE_LOW>;
+ };
+
+ eg20t_gpio: eg20t_gpio@2,0,2 {
+ compatible = "pci8086,8803";
+ reg = <0x00020200 0 0 0 0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eg20t_i2c@2,12,2 {
+ compatible = "pci8086,8817";
+ reg = <0x00026200 0 0 0 0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@0x68 {
+ compatible = "st,m41t81s";
+ reg = <0x68>;
+ };
+ };
+ };
+ };
+ };
+
+ gic: interrupt-controller@16120000 {
+ compatible = "mti,gic";
+ reg = <0x16120000 0x20000>;
+
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ timer {
+ compatible = "mti,gic-timer";
+ interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
+ clocks = <&clk_boston BOSTON_CLK_CPU>;
+ };
+ };
+
+ cdmm@16140000 {
+ compatible = "mti,mips-cdmm";
+ reg = <0x16140000 0x8000>;
+ };
+
+ cpc@16200000 {
+ compatible = "mti,mips-cpc";
+ reg = <0x16200000 0x8000>;
+ };
+
+ plat_regs: system-controller@17ffd000 {
+ compatible = "img,boston-platform-regs", "syscon";
+ reg = <0x17ffd000 0x1000>;
+
+ clk_boston: clock {
+ compatible = "img,boston-clock";
+ #clock-cells = <1>;
+ };
+ };
+
+ reboot: syscon-reboot {
+ compatible = "syscon-reboot";
+ regmap = <&plat_regs>;
+ offset = <0x10>;
+ mask = <0x10>;
+ };
+
+ uart0: uart@17ffe000 {
+ compatible = "ns16550a";
+ reg = <0x17ffe000 0x1000>;
+ reg-shift = <2>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&clk_boston BOSTON_CLK_SYS>;
+ };
+
+ lcd: lcd@17fff000 {
+ compatible = "img,boston-lcd";
+ reg = <0x17fff000 0x8>;
+ };
+};
diff --git a/arch/mips/boot/dts/mti/sead3.dts b/arch/mips/boot/dts/mti/sead3.dts
index b112879a5d9d..4f8bc83c2960 100644
--- a/arch/mips/boot/dts/mti/sead3.dts
+++ b/arch/mips/boot/dts/mti/sead3.dts
@@ -11,15 +11,14 @@
#size-cells = <1>;
compatible = "mti,sead-3";
model = "MIPS SEAD-3";
- interrupt-parent = <&gic>;
chosen {
- stdout-path = "uart1:115200";
+ stdout-path = "serial1:115200";
};
aliases {
- uart0 = &uart0;
- uart1 = &uart1;
+ serial0 = &uart0;
+ serial1 = &uart1;
};
cpus {
@@ -54,18 +53,14 @@
* controller & should be probed first.
*/
interrupt-parent = <&cpu_intc>;
-
- timer {
- compatible = "mti,gic-timer";
- interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
- };
};
ehci@1b200000 {
compatible = "generic-ehci";
reg = <0x1b200000 0x1000>;
- interrupts = <0>; /* GIC 0 or CPU 6 */
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 0 IRQ_TYPE_LEVEL_HIGH>; /* GIC 0 or CPU 6 */
has-transaction-translator;
};
@@ -227,7 +222,8 @@
clock-frequency = <14745600>;
- interrupts = <3>; /* GIC 3 or CPU 4 */
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>; /* GIC 3 or CPU 4 */
no-loopback-test;
};
@@ -241,7 +237,8 @@
clock-frequency = <14745600>;
- interrupts = <2>; /* GIC 2 or CPU 4 */
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 2 IRQ_TYPE_LEVEL_HIGH>; /* GIC 2 or CPU 4 */
no-loopback-test;
};
@@ -251,7 +248,8 @@
reg = <0x1f010000 0x10000>;
reg-io-width = <4>;
- interrupts = <0>; /* GIC 0 or CPU 6 */
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 0 IRQ_TYPE_LEVEL_HIGH>; /* GIC 0 or CPU 6 */
phy-mode = "mii";
smsc,irq-push-pull;
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index 320772caf054..92fca3c42eac 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -3,7 +3,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
CONFIG_KEXEC=y
# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
@@ -41,7 +40,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
@@ -86,7 +84,6 @@ CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -99,8 +96,6 @@ CONFIG_FIXED_PHY=y
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_CPMAC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
CONFIG_PPP_MULTILINK=y
CONFIG_PPP_FILTER=y
@@ -142,7 +137,6 @@ CONFIG_BSD_DISKLABEL=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
CONFIG_CRYPTO=y
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 134879c1310a..25ed914933e5 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -7,7 +7,6 @@ CONFIG_ATH79_MACH_PB44=y
CONFIG_ATH79_MACH_UBNT_XM=y
CONFIG_HZ_100=y
# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_HIGH_RES_TIMERS=y
@@ -35,7 +34,6 @@ CONFIG_IP_ADVANCED_ROUTER=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_CFG80211=m
CONFIG_MAC80211=m
diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig
index 5599a9f1e3c6..131b350f014f 100644
--- a/arch/mips/configs/bcm63xx_defconfig
+++ b/arch/mips/configs/bcm63xx_defconfig
@@ -5,7 +5,6 @@ CONFIG_BCM63XX_CPU_6348=y
CONFIG_BCM63XX_CPU_6358=y
CONFIG_NO_HZ=y
# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_TINY_RCU=y
@@ -33,7 +32,6 @@ CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_CFG80211=y
@@ -50,13 +48,10 @@ CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
# CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
CONFIG_BCM63XX_PHY=y
CONFIG_NET_ETHERNET=y
CONFIG_BCM63XX_ENET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_B43=y
# CONFIG_B43_PHY_LP is not set
# CONFIG_INPUT is not set
@@ -70,7 +65,6 @@ CONFIG_SERIAL_BCM63XX_CONSOLE=y
# CONFIG_HWMON is not set
# CONFIG_VGA_ARB is not set
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
CONFIG_USB_OHCI_HCD=y
@@ -84,7 +78,6 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_PROC_KCORE=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_MAGIC_SYSRQ=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200"
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index d20b09d77b53..a55009edbb29 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -4,7 +4,6 @@ CONFIG_SMP=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -60,7 +59,6 @@ CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
-# CONFIG_INET_LRO is not set
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -182,7 +180,6 @@ CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
@@ -284,7 +281,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRC_T10DIF=m
CONFIG_CRC7=m
diff --git a/arch/mips/configs/bmips_be_defconfig b/arch/mips/configs/bmips_be_defconfig
index acf7785c4cdb..a7072a14d396 100644
--- a/arch/mips/configs/bmips_be_defconfig
+++ b/arch/mips/configs/bmips_be_defconfig
@@ -23,7 +23,6 @@ CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_CFG80211=y
CONFIG_NL80211_TESTMODE=y
diff --git a/arch/mips/configs/capcella_defconfig b/arch/mips/configs/capcella_defconfig
index 2924ba34a01b..bd80b5c852dd 100644
--- a/arch/mips/configs/capcella_defconfig
+++ b/arch/mips/configs/capcella_defconfig
@@ -1,6 +1,5 @@
CONFIG_MACH_VR41XX=y
CONFIG_ZAO_CAPCELLA=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -46,8 +45,6 @@ CONFIG_SMSC_PHY=m
CONFIG_NET_ETHERNET=y
CONFIG_NET_PCI=y
CONFIG_8139TOO=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -59,7 +56,6 @@ CONFIG_SERIAL_VR41XX_CONSOLE=y
CONFIG_GPIO_VR41XX=y
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_VR41XX=y
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index d4fda41f00ba..e5b18f1a31a0 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -42,7 +42,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index 43e0ba24470c..b42cfa7865f9 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -41,7 +41,6 @@ CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig
index 23b66934e18d..a9066f300665 100644
--- a/arch/mips/configs/cobalt_defconfig
+++ b/arch/mips/configs/cobalt_defconfig
@@ -1,5 +1,4 @@
CONFIG_MIPS_COBALT=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
@@ -15,17 +14,14 @@ CONFIG_XFRM_USER=y
CONFIG_NET_KEY=y
CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV_LOOP=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_RAID_ATTRS=y
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
@@ -36,8 +32,6 @@ CONFIG_NET_ETHERNET=y
CONFIG_NET_TULIP=y
CONFIG_DE2104X=y
CONFIG_TULIP=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
@@ -56,7 +50,6 @@ CONFIG_FB_COBALT=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_HID=m
CONFIG_USB=m
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
CONFIG_USB_OHCI_HCD=m
@@ -84,6 +77,5 @@ CONFIG_NFS_V3_ACL=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
CONFIG_NFSD_V3_ACL=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRC16=y
CONFIG_LIBCRC32C=y
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index 2b6cb41d5715..e149f78901f8 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -1,6 +1,5 @@
CONFIG_MACH_DECSTATION=y
CONFIG_CPU_R3000=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
diff --git a/arch/mips/configs/e55_defconfig b/arch/mips/configs/e55_defconfig
index e94d266c4b97..c3ac0209457c 100644
--- a/arch/mips/configs/e55_defconfig
+++ b/arch/mips/configs/e55_defconfig
@@ -1,6 +1,5 @@
CONFIG_MACH_VR41XX=y
CONFIG_CASIO_E55=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -28,7 +27,6 @@ CONFIG_SERIAL_VR41XX_CONSOLE=y
CONFIG_GPIO_VR41XX=y
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_VR41XX=y
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 87435897fd50..499f51498ecb 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -3,7 +3,6 @@ CONFIG_64BIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-fuloong2e"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
@@ -47,7 +46,6 @@ CONFIG_NET_IPGRE=m
CONFIG_NET_IPGRE_BROADCAST=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
@@ -79,7 +77,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
@@ -88,7 +85,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
@@ -101,7 +97,6 @@ CONFIG_NET_9P=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_FW_LOADER=m
CONFIG_MTD=m
-CONFIG_MTD_CHAR=m
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=m
CONFIG_MTD_JEDECPROBE=m
@@ -163,7 +158,6 @@ CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_VIAPRO=m
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FB_RADEON=y
# CONFIG_FB_RADEON_I2C is not set
@@ -184,7 +178,6 @@ CONFIG_USB_KBD=y
CONFIG_USB_MOUSE=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OTG_WHITELIST=y
CONFIG_USB_WUSB_CBAF=m
CONFIG_USB_C67X00_HCD=m
@@ -201,7 +194,6 @@ CONFIG_USB_TMC=m
CONFIG_USB_STORAGE=y
CONFIG_USB_STORAGE_ONETOUCH=y
CONFIG_USB_STORAGE_CYPRESS_ATACB=y
-CONFIG_USB_LIBUSUAL=y
CONFIG_USB_SEVSEG=m
CONFIG_USB_ISIGHTFW=m
CONFIG_UIO=m
@@ -215,7 +207,6 @@ CONFIG_EXT4_FS=m
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_REISERFS_FS=m
-CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_ISO9660_FS=m
@@ -256,8 +247,6 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_CCM=m
diff --git a/arch/mips/configs/generic/board-boston.config b/arch/mips/configs/generic/board-boston.config
new file mode 100644
index 000000000000..19560a45b683
--- /dev/null
+++ b/arch/mips/configs/generic/board-boston.config
@@ -0,0 +1,48 @@
+CONFIG_FIT_IMAGE_FDT_BOSTON=y
+
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+
+CONFIG_AUXDISPLAY=y
+CONFIG_IMG_ASCII_LCD=y
+
+CONFIG_COMMON_CLK_BOSTON=y
+
+CONFIG_DMADEVICES=y
+CONFIG_PCH_DMA=y
+
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PCH=y
+
+CONFIG_I2C=y
+CONFIG_I2C_EG20T=y
+
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+
+CONFIG_NETDEVICES=y
+CONFIG_PCH_GBE=y
+
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIE_XILINX=y
+
+CONFIG_PCH_PHUB=y
+
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_M41T80=y
+
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+
+CONFIG_SPI=y
+CONFIG_SPI_TOPCLIFF_PCH=y
+
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index e24feb0633aa..b1911816337c 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -2,7 +2,6 @@ CONFIG_MIPS_ALCHEMY=y
CONFIG_MIPS_GPR=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -59,7 +58,6 @@ CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
@@ -68,7 +66,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
@@ -166,7 +163,6 @@ CONFIG_YAM=m
CONFIG_CFG80211=y
CONFIG_MAC80211=y
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -200,8 +196,6 @@ CONFIG_SMSC_PHY=m
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_MIPS_AU1X00_ENET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_ATH_COMMON=y
CONFIG_ATH_DEBUG=y
CONFIG_ATH5K=y
@@ -286,14 +280,12 @@ CONFIG_USB_HIDDEV=y
CONFIG_USB_KBD=m
CONFIG_USB_MOUSE=m
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=m
-CONFIG_USB_LIBUSUAL=y
CONFIG_USB_SERIAL=y
CONFIG_USB_EZUSB=y
CONFIG_USB_SERIAL_GENERIC=y
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index ec8e9684296d..83e8fe2064aa 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -4,7 +4,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -46,7 +45,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -139,7 +137,6 @@ CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
@@ -148,7 +145,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
@@ -163,7 +159,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -174,7 +169,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
@@ -215,7 +209,6 @@ CONFIG_RFKILL=m
CONFIG_CONNECTOR=m
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
-# CONFIG_MISC_DEVICES is not set
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -245,8 +238,6 @@ CONFIG_MDIO_BITBANG=m
CONFIG_NET_ETHERNET=y
CONFIG_SMC91X=m
CONFIG_SGISEEQ=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_HOSTAP=m
CONFIG_INPUT_MOUSEDEV=m
CONFIG_MOUSE_PS2=m
@@ -286,7 +277,6 @@ CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
@@ -355,7 +345,6 @@ CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_DEBUG_MEMORY_INIT=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_KEYS=y
CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_NULL=m
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index e582069b44fd..a0d593248668 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -5,7 +5,6 @@ CONFIG_SMP=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
@@ -104,7 +103,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_OSD=m
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -325,7 +323,6 @@ CONFIG_XFS_POSIX_ACL=y
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
-CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_FSCACHE=m
@@ -342,7 +339,6 @@ CONFIG_NFS_V3=y
CONFIG_RPCSEC_GSS_KRB5=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_DLM=m
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_KEYS=y
CONFIG_SECURITYFS=y
CONFIG_CRYPTO_FIPS=y
@@ -378,7 +374,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_DEV_HIFN_795X=m
CONFIG_CRC_T10DIF=m
diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
index 4dbf6269b3f9..d0a4c2cfacf8 100644
--- a/arch/mips/configs/ip28_defconfig
+++ b/arch/mips/configs/ip28_defconfig
@@ -1,7 +1,6 @@
CONFIG_SGI_IP28=y
CONFIG_ARC_CONSOLE=y
CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -35,10 +34,8 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_TCP_MD5SIG=y
# CONFIG_IPV6 is not set
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
@@ -48,8 +45,6 @@ CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_NET_ETHERNET=y
CONFIG_SGISEEQ=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_MOUSE_PS2_ALPS is not set
# CONFIG_MOUSE_PS2_SYNAPTICS is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index f9af98f63cff..1e26e58b9dc3 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -1,6 +1,5 @@
CONFIG_SGI_IP32=y
# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -38,7 +37,6 @@ CONFIG_NET_IPGRE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-# CONFIG_INET_LRO is not set
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_MD5SIG=y
CONFIG_INET6_AH=m
@@ -76,8 +74,6 @@ CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=m
CONFIG_TULIP_MMIO=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_INPUT_EVDEV=m
# CONFIG_SERIO_I8042 is not set
CONFIG_SERIO_MACEPS2=y
@@ -87,7 +83,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_GBE=y
@@ -117,7 +112,6 @@ CONFIG_EXT3_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
@@ -178,8 +172,6 @@ CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_KEYS=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_CBC=y
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index 3019fce63cd3..9ad1c94376c8 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -1,7 +1,6 @@
CONFIG_MACH_JAZZ=y
CONFIG_OLIVETTI_M700=y
CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -85,7 +84,6 @@ CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
@@ -94,7 +92,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
@@ -109,7 +106,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -120,7 +116,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
@@ -276,7 +271,6 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
diff --git a/arch/mips/configs/jmr3927_defconfig b/arch/mips/configs/jmr3927_defconfig
index 9bc08f275120..af12281a5c33 100644
--- a/arch/mips/configs/jmr3927_defconfig
+++ b/arch/mips/configs/jmr3927_defconfig
@@ -18,23 +18,18 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_NET_PCI=y
CONFIG_TC35815=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -58,5 +53,3 @@ CONFIG_PROC_KCORE=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/mips/configs/lasat_defconfig b/arch/mips/configs/lasat_defconfig
index e620a2c3eba4..947a35c7c46c 100644
--- a/arch/mips/configs/lasat_defconfig
+++ b/arch/mips/configs/lasat_defconfig
@@ -5,7 +5,6 @@ CONFIG_DS1603=y
CONFIG_LASAT_SYSCTL=y
CONFIG_HZ_1000=y
# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
@@ -31,7 +30,6 @@ CONFIG_INET=y
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -44,8 +42,6 @@ CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_NET_PCI=y
CONFIG_PCNET32=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -56,7 +52,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 8df80c6383f2..1ec8ed8d05d1 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -7,7 +7,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_KEXEC=y
# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -83,8 +82,6 @@ CONFIG_NET_SCHED=y
CONFIG_NET_EMATCH=y
CONFIG_NET_CLS_ACT=y
CONFIG_BT=m
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
@@ -142,7 +139,6 @@ CONFIG_8139TOO=y
# CONFIG_8139TOO_PIO is not set
CONFIG_R8169=y
CONFIG_R8169_VLAN=y
-# CONFIG_NETDEV_10000 is not set
CONFIG_USB_USBNET=m
CONFIG_USB_NET_CDC_EEM=m
CONFIG_NETCONSOLE=m
@@ -205,7 +201,6 @@ CONFIG_USB_ZR364XX=m
CONFIG_USB_STKWEBCAM=m
CONFIG_USB_S2255=m
# CONFIG_RADIO_ADAPTERS is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
@@ -290,7 +285,6 @@ CONFIG_HID_WACOM=m
CONFIG_HID_ZEROPLUS=m
CONFIG_ZEROPLUS_FF=y
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_DYNAMIC_MINORS=y
CONFIG_USB_OTG_WHITELIST=y
CONFIG_USB_MON=y
@@ -313,10 +307,8 @@ CONFIG_USB_STORAGE_SDDR09=m
CONFIG_USB_STORAGE_SDDR55=m
CONFIG_USB_STORAGE_JUMPSHOT=m
CONFIG_USB_STORAGE_ALAUDA=m
-CONFIG_USB_LIBUSUAL=y
CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_LED=m
CONFIG_USB_GADGET=m
CONFIG_USB_GADGET_M66592=y
CONFIG_MMC=m
@@ -341,7 +333,6 @@ CONFIG_XFS_POSIX_ACL=y
CONFIG_BTRFS_FS=m
CONFIG_QUOTA=y
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
@@ -407,8 +398,6 @@ CONFIG_PRINTK_TIME=y
CONFIG_FRAME_WARN=1024
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_KEYS=y
CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_NULL=m
@@ -446,6 +435,5 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRC_T10DIF=y
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 7f95c4b3ab2c..324dfee23dfb 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -95,7 +95,6 @@ CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
@@ -252,7 +251,6 @@ CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB_RADEON=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m
@@ -335,7 +333,6 @@ CONFIG_STRIP_ASM_SYMS=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_RCU_CPU_STALL_VERBOSE is not set
# CONFIG_FTRACE is not set
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index e233f878afef..80ecd94ed126 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -133,7 +133,6 @@ CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index fbe085c328ab..35ad1f8d1a79 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -132,7 +132,6 @@ CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index cbf37dd0c490..77145ecaa23b 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -42,7 +42,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-# CONFIG_INET_LRO is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index 35f6ba260df8..cc2687cfdc13 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -43,7 +43,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-# CONFIG_INET_LRO is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
@@ -135,7 +134,6 @@ CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MATROX=y
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index 900f14543eeb..55b68b981b05 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -46,7 +46,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-# CONFIG_INET_LRO is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index 8e2738b5e180..5ca590cf1635 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -47,7 +47,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-# CONFIG_INET_LRO is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
@@ -140,7 +139,6 @@ CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MATROX=y
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index 6dc4e309a691..7ea7c0ba2666 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -42,7 +42,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-# CONFIG_INET_LRO is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
@@ -134,7 +133,6 @@ CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MATROX=y
diff --git a/arch/mips/configs/markeins_defconfig b/arch/mips/configs/markeins_defconfig
index 0f08e4623ee4..43ce6576ab1c 100644
--- a/arch/mips/configs/markeins_defconfig
+++ b/arch/mips/configs/markeins_defconfig
@@ -1,7 +1,6 @@
CONFIG_NEC_MARKEINS=y
CONFIG_HZ_1000=y
CONFIG_PREEMPT=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -92,7 +91,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
@@ -117,7 +115,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
@@ -125,7 +122,6 @@ CONFIG_IP6_NF_RAW=m
CONFIG_FW_LOADER=m
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/mips/configs/mips_paravirt_defconfig b/arch/mips/configs/mips_paravirt_defconfig
index 84cfcb4bf2ea..accf0db1dc6f 100644
--- a/arch/mips/configs/mips_paravirt_defconfig
+++ b/arch/mips/configs/mips_paravirt_defconfig
@@ -39,7 +39,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
CONFIG_IPV6=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
diff --git a/arch/mips/configs/mpc30x_defconfig b/arch/mips/configs/mpc30x_defconfig
index a2c045fab6c5..3486b034f726 100644
--- a/arch/mips/configs/mpc30x_defconfig
+++ b/arch/mips/configs/mpc30x_defconfig
@@ -1,6 +1,5 @@
CONFIG_MACH_VR41XX=y
CONFIG_VICTOR_MPC30X=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
@@ -31,8 +30,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_PATA_LEGACY=y
CONFIG_NETDEVICES=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_USB_PEGASUS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -45,13 +42,11 @@ CONFIG_SERIAL_VR41XX_CONSOLE=y
CONFIG_GPIO_VR41XX=y
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
CONFIG_USB=m
CONFIG_USB_OHCI_HCD=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_VR41XX=y
CONFIG_EXT2_FS=y
-CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_CONFIGFS_FS=m
diff --git a/arch/mips/configs/msp71xx_defconfig b/arch/mips/configs/msp71xx_defconfig
index 201edfb2637d..3c8c16b10732 100644
--- a/arch/mips/configs/msp71xx_defconfig
+++ b/arch/mips/configs/msp71xx_defconfig
@@ -2,7 +2,6 @@ CONFIG_PMC_MSP=y
CONFIG_PMC_MSP7120_GW=y
CONFIG_CPU_MIPS32_R2=y
CONFIG_PREEMPT=y
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-pmc"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -38,7 +37,6 @@ CONFIG_BRIDGE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index f3f60056bc27..4011f1869e72 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -1,7 +1,6 @@
CONFIG_MIPS_ALCHEMY=y
CONFIG_MIPS_MTX1=y
CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -81,7 +80,6 @@ CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
@@ -90,7 +88,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
@@ -98,7 +95,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -108,7 +104,6 @@ CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
@@ -225,8 +220,6 @@ CONFIG_TOSHIBA_FIR=m
CONFIG_VLSI_FIR=m
CONFIG_MCS_FIR=m
CONFIG_BT=m
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
@@ -246,7 +239,6 @@ CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
CONFIG_CONNECTOR=m
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -257,7 +249,6 @@ CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_CHR_DEV_SG=m
@@ -596,7 +587,6 @@ CONFIG_USB_STORAGE_SDDR55=m
CONFIG_USB_STORAGE_JUMPSHOT=m
CONFIG_USB_STORAGE_ALAUDA=m
CONFIG_USB_STORAGE_KARMA=m
-CONFIG_USB_LIBUSUAL=y
CONFIG_USB_MDC800=m
CONFIG_USB_MICROTEK=m
CONFIG_USB_SERIAL=m
@@ -640,7 +630,6 @@ CONFIG_USB_ADUTUX=m
CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
-CONFIG_USB_LED=m
CONFIG_USB_CYPRESS_CY7C63=m
CONFIG_USB_CYTHERM=m
CONFIG_USB_IDMOUSE=m
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index 07d01827a973..5720ce23e9aa 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -6,7 +6,6 @@ CONFIG_KSM=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_SMP=y
# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -183,14 +182,12 @@ CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
@@ -317,7 +314,6 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -607,7 +603,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRC_CCITT=m
CONFIG_CRC7=m
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index f59969acb724..fea56c535d92 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -7,7 +7,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_KEXEC=y
-CONFIG_EXPERIMENTAL=y
CONFIG_CROSS_COMPILE=""
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
@@ -163,7 +162,6 @@ CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -171,7 +169,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
@@ -186,7 +183,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -197,7 +193,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
@@ -308,7 +303,6 @@ CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_CDROM_PKTCDVD=y
-CONFIG_MISC_DEVICES=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -369,7 +363,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_UIO=y
CONFIG_UIO_PDRV=m
@@ -522,7 +515,6 @@ CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_KGDB=y
@@ -568,7 +560,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRC_CCITT=m
CONFIG_CRC7=m
diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig
index c887066ecc2a..81b5eb89446c 100644
--- a/arch/mips/configs/pnx8335_stb225_defconfig
+++ b/arch/mips/configs/pnx8335_stb225_defconfig
@@ -5,7 +5,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_128=y
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -27,12 +26,10 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_INET_AH=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -41,15 +38,12 @@ CONFIG_MTD_CFI_GEOMETRY=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV_LOOP=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_EVBUG=m
@@ -94,4 +88,3 @@ CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
-CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig
index d7bb8cce1068..3f1333517405 100644
--- a/arch/mips/configs/qi_lb60_defconfig
+++ b/arch/mips/configs/qi_lb60_defconfig
@@ -34,7 +34,6 @@ CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
@@ -109,7 +108,6 @@ CONFIG_USB_GADGET_DEBUG=y
CONFIG_USB_ETH=y
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_MMC=y
-CONFIG_MMC_UNSAFE_RESUME=y
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_JZ4740=y
CONFIG_RTC_CLASS=y
@@ -183,7 +181,6 @@ CONFIG_PANIC_ON_OOPS=y
# CONFIG_FTRACE is not set
CONFIG_KGDB=y
CONFIG_RUNTIME_DEBUG=y
-CONFIG_CRYPTO_ZLIB=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_FONTS=y
CONFIG_FONT_SUN8x16=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index 5d9d708e12e5..6fa56c6e53f5 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -3,7 +3,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -39,7 +38,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_CUBIC=m
@@ -114,7 +112,6 @@ CONFIG_NET_CLS_IND=y
CONFIG_HAMRADIO=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_BLOCK2MTD=y
CONFIG_MTD_NAND=y
@@ -129,8 +126,6 @@ CONFIG_NET_ETHERNET=y
CONFIG_KORINA=y
CONFIG_NET_PCI=y
CONFIG_VIA_RHINE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_ATMEL=m
CONFIG_PPP=m
CONFIG_PPP_MULTILINK=y
@@ -183,7 +178,6 @@ CONFIG_BSD_DISKLABEL=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ZLIB=y
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC16=m
CONFIG_LIBCRC32C=m
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index 43d55e5abacb..fb195e29e449 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -31,12 +31,10 @@ CONFIG_IP_PNP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=m
CONFIG_MTD_BLOCK_RO=m
CONFIG_MTD_CFI=y
@@ -50,7 +48,6 @@ CONFIG_MTD_NAND_TXX9NDFMC=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-# CONFIG_MISC_DEVICES is not set
CONFIG_IDE=y
CONFIG_BLK_DEV_IDE_TX4938=y
CONFIG_BLK_DEV_IDE_TX4939=y
@@ -60,8 +57,6 @@ CONFIG_SMC91X=y
CONFIG_NE2000=y
CONFIG_NET_PCI=y
CONFIG_TC35815=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -108,5 +103,3 @@ CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index c2b4e3f33a73..99679e514042 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -3,7 +3,6 @@ CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_ARC_CONSOLE=y
CONFIG_HZ_1000=y
CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -94,7 +93,6 @@ CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
@@ -103,7 +101,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
@@ -118,7 +115,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -129,7 +125,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
@@ -214,7 +209,6 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_SX8=m
-CONFIG_BLK_DEV_UB=m
CONFIG_BLK_DEV_RAM=m
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
@@ -353,7 +347,6 @@ CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
-CONFIG_USB_LED=m
CONFIG_USB_CYTHERM=m
CONFIG_USB_SISUSBVGA=m
CONFIG_USB_LD=m
@@ -366,7 +359,6 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
index d14ae2fa7d13..c695b7b1c4ae 100644
--- a/arch/mips/configs/rt305x_defconfig
+++ b/arch/mips/configs/rt305x_defconfig
@@ -5,7 +5,6 @@ CONFIG_CPU_MIPS32_R2=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HZ_100=y
# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_HIGH_RES_TIMERS=y
@@ -44,7 +43,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig
index 7fca09fedb59..c724bdd6a7e6 100644
--- a/arch/mips/configs/sb1250_swarm_defconfig
+++ b/arch/mips/configs/sb1250_swarm_defconfig
@@ -4,7 +4,6 @@ CONFIG_64BIT=y
CONFIG_SMP=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_CGROUPS=y
diff --git a/arch/mips/configs/tb0219_defconfig b/arch/mips/configs/tb0219_defconfig
index 11f51505d562..4041597e3170 100644
--- a/arch/mips/configs/tb0219_defconfig
+++ b/arch/mips/configs/tb0219_defconfig
@@ -1,6 +1,5 @@
CONFIG_MACH_VR41XX=y
CONFIG_TANBAC_TB0219=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -31,7 +30,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -40,7 +38,6 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_XIP=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
CONFIG_PHYLIB=m
CONFIG_MARVELL_PHY=m
@@ -57,7 +54,6 @@ CONFIG_VIA_RHINE=y
CONFIG_VIA_RHINE_MMIO=y
CONFIG_R8169=y
CONFIG_VIA_VELOCITY=y
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -70,7 +66,6 @@ CONFIG_SERIAL_VR41XX_CONSOLE=y
CONFIG_GPIO_TB0219=y
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
CONFIG_USB=m
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=m
@@ -91,6 +86,5 @@ CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="cca=3 mem=64M console=ttyVR0,115200 ip=any root=/dev/nfs"
diff --git a/arch/mips/configs/tb0226_defconfig b/arch/mips/configs/tb0226_defconfig
index 9327b3af32cd..565f0441c50d 100644
--- a/arch/mips/configs/tb0226_defconfig
+++ b/arch/mips/configs/tb0226_defconfig
@@ -1,6 +1,5 @@
CONFIG_MACH_VR41XX=y
CONFIG_TANBAC_TB0226=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -29,7 +28,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -37,7 +35,6 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_XIP=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
@@ -49,8 +46,6 @@ CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_NET_PCI=y
CONFIG_E100=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
@@ -66,7 +61,6 @@ CONFIG_SERIAL_VR41XX_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
@@ -87,7 +81,6 @@ CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="cca=3 mem=32M console=ttyVR0,115200"
CONFIG_CRC32=m
diff --git a/arch/mips/configs/tb0287_defconfig b/arch/mips/configs/tb0287_defconfig
index a967289b7970..a702be602fb9 100644
--- a/arch/mips/configs/tb0287_defconfig
+++ b/arch/mips/configs/tb0287_defconfig
@@ -1,5 +1,4 @@
CONFIG_MACH_VR41XX=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -31,7 +30,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_BIC=y
CONFIG_TCP_CONG_CUBIC=m
@@ -43,7 +41,6 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_XIP=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SCAN_ASYNC=y
# CONFIG_SCSI_LOWLEVEL is not set
@@ -64,7 +61,6 @@ CONFIG_VIA_RHINE=y
CONFIG_VIA_RHINE_MMIO=y
CONFIG_R8169=y
CONFIG_VIA_VELOCITY=y
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -76,7 +72,6 @@ CONFIG_SERIAL_VR41XX_CONSOLE=y
CONFIG_GPIO_VR41XX=y
# CONFIG_HWMON is not set
CONFIG_MFD_SM501=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FB_SM501=y
# CONFIG_VGA_CONSOLE is not set
diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig
index ee4b2be43c44..a84eac409c9c 100644
--- a/arch/mips/configs/workpad_defconfig
+++ b/arch/mips/configs/workpad_defconfig
@@ -1,6 +1,5 @@
CONFIG_MACH_VR41XX=y
CONFIG_IBM_WORKPAD=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -28,13 +27,10 @@ CONFIG_IP_MULTICAST=y
# CONFIG_IPV6 is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_BLK_DEV_RAM=m
-# CONFIG_MISC_DEVICES is not set
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=m
CONFIG_IDE_GENERIC=y
CONFIG_NETDEVICES=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_NET_PCMCIA=y
CONFIG_PCMCIA_3C589=m
CONFIG_PCMCIA_3C574=m
diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig
index a606b3f9196c..51ffbbaddee2 100644
--- a/arch/mips/generic/Kconfig
+++ b/arch/mips/generic/Kconfig
@@ -9,11 +9,31 @@ config LEGACY_BOARDS
kernel is booted without being provided with an FDT via the UHI
boot protocol.
+config YAMON_DT_SHIM
+ bool
+ help
+ Select this from your board if the board uses the YAMON bootloader
+ and you wish to include code which helps translate various
+ YAMON-provided environment variables into a device tree properties.
+
+comment "Legacy (non-UHI/non-FIT) Boards"
+
config LEGACY_BOARD_SEAD3
bool "Support MIPS SEAD-3 boards"
select LEGACY_BOARDS
+ select YAMON_DT_SHIM
help
Enable this to include support for booting on MIPS SEAD-3 FPGA-based
development boards, which boot using a legacy boot protocol.
+comment "FIT/UHI Boards"
+
+config FIT_IMAGE_FDT_BOSTON
+ bool "Include FDT for MIPS Boston boards"
+ help
+ Enable this to include the FDT for the MIPS Boston development board
+ from Imagination Technologies in the FIT kernel image. You should
+ enable this if you wish to boot on a MIPS Boston board, as it is
+ expected by the bootloader.
+
endif
diff --git a/arch/mips/generic/Makefile b/arch/mips/generic/Makefile
index acb9b6d62b16..56b3ea565ed9 100644
--- a/arch/mips/generic/Makefile
+++ b/arch/mips/generic/Makefile
@@ -12,5 +12,6 @@ obj-y += init.o
obj-y += irq.o
obj-y += proc.o
+obj-$(CONFIG_YAMON_DT_SHIM) += yamon-dt.o
obj-$(CONFIG_LEGACY_BOARD_SEAD3) += board-sead3.o
obj-$(CONFIG_KEXEC) += kexec.o
diff --git a/arch/mips/generic/board-sead3.c b/arch/mips/generic/board-sead3.c
index f4ae0584a33b..f109a6b9fdd0 100644
--- a/arch/mips/generic/board-sead3.c
+++ b/arch/mips/generic/board-sead3.c
@@ -13,10 +13,12 @@
#include <linux/errno.h>
#include <linux/libfdt.h>
#include <linux/printk.h>
+#include <linux/sizes.h>
#include <asm/fw/fw.h>
#include <asm/io.h>
#include <asm/machine.h>
+#include <asm/yamon-dt.h>
#define SEAD_CONFIG CKSEG1ADDR(0x1b100110)
#define SEAD_CONFIG_GIC_PRESENT BIT(1)
@@ -25,6 +27,15 @@
#define MIPS_REVISION_MACHINE (0xf << 4)
#define MIPS_REVISION_MACHINE_SEAD3 (0x4 << 4)
+/*
+ * Maximum 384MB RAM at physical address 0, preceding any I/O.
+ */
+static struct yamon_mem_region mem_regions[] __initdata = {
+ /* start size */
+ { 0, SZ_256M + SZ_128M },
+ {}
+};
+
static __init bool sead3_detect(void)
{
uint32_t rev;
@@ -33,96 +44,9 @@ static __init bool sead3_detect(void)
return (rev & MIPS_REVISION_MACHINE) == MIPS_REVISION_MACHINE_SEAD3;
}
-static __init int append_cmdline(void *fdt)
-{
- int err, chosen_off;
-
- /* find or add chosen node */
- chosen_off = fdt_path_offset(fdt, "/chosen");
- if (chosen_off == -FDT_ERR_NOTFOUND)
- chosen_off = fdt_path_offset(fdt, "/chosen@0");
- if (chosen_off == -FDT_ERR_NOTFOUND)
- chosen_off = fdt_add_subnode(fdt, 0, "chosen");
- if (chosen_off < 0) {
- pr_err("Unable to find or add DT chosen node: %d\n",
- chosen_off);
- return chosen_off;
- }
-
- err = fdt_setprop_string(fdt, chosen_off, "bootargs", fw_getcmdline());
- if (err) {
- pr_err("Unable to set bootargs property: %d\n", err);
- return err;
- }
-
- return 0;
-}
-
static __init int append_memory(void *fdt)
{
- unsigned long phys_memsize, memsize;
- __be32 mem_array[2];
- int err, mem_off;
- char *var;
-
- /* find memory size from the bootloader environment */
- var = fw_getenv("memsize");
- if (var) {
- err = kstrtoul(var, 0, &phys_memsize);
- if (err) {
- pr_err("Failed to read memsize env variable '%s'\n",
- var);
- return -EINVAL;
- }
- } else {
- pr_warn("The bootloader didn't provide memsize: defaulting to 32MB\n");
- phys_memsize = 32 << 20;
- }
-
- /* default to using all available RAM */
- memsize = phys_memsize;
-
- /* allow the user to override the usable memory */
- var = strstr(arcs_cmdline, "memsize=");
- if (var)
- memsize = memparse(var + strlen("memsize="), NULL);
-
- /* if the user says there's more RAM than we thought, believe them */
- phys_memsize = max_t(unsigned long, phys_memsize, memsize);
-
- /* find or add a memory node */
- mem_off = fdt_path_offset(fdt, "/memory");
- if (mem_off == -FDT_ERR_NOTFOUND)
- mem_off = fdt_add_subnode(fdt, 0, "memory");
- if (mem_off < 0) {
- pr_err("Unable to find or add memory DT node: %d\n", mem_off);
- return mem_off;
- }
-
- err = fdt_setprop_string(fdt, mem_off, "device_type", "memory");
- if (err) {
- pr_err("Unable to set memory node device_type: %d\n", err);
- return err;
- }
-
- mem_array[0] = 0;
- mem_array[1] = cpu_to_be32(phys_memsize);
- err = fdt_setprop(fdt, mem_off, "reg", mem_array, sizeof(mem_array));
- if (err) {
- pr_err("Unable to set memory regs property: %d\n", err);
- return err;
- }
-
- mem_array[0] = 0;
- mem_array[1] = cpu_to_be32(memsize);
- err = fdt_setprop(fdt, mem_off, "linux,usable-memory",
- mem_array, sizeof(mem_array));
- if (err) {
- pr_err("Unable to set linux,usable-memory property: %d\n", err);
- return err;
- }
-
- return 0;
+ return yamon_dt_append_memory(fdt, mem_regions);
}
static __init int remove_gic(void *fdt)
@@ -163,14 +87,16 @@ static __init int remove_gic(void *fdt)
return -EINVAL;
}
- err = fdt_setprop_u32(fdt, 0, "interrupt-parent", cpu_phandle);
- if (err) {
- pr_err("unable to set root interrupt-parent: %d\n", err);
- return err;
- }
-
uart_off = fdt_node_offset_by_compatible(fdt, -1, "ns16550a");
while (uart_off >= 0) {
+ err = fdt_setprop_u32(fdt, uart_off, "interrupt-parent",
+ cpu_phandle);
+ if (err) {
+ pr_warn("unable to set UART interrupt-parent: %d\n",
+ err);
+ return err;
+ }
+
err = fdt_setprop_u32(fdt, uart_off, "interrupts",
cpu_uart_int);
if (err) {
@@ -193,6 +119,12 @@ static __init int remove_gic(void *fdt)
return eth_off;
}
+ err = fdt_setprop_u32(fdt, eth_off, "interrupt-parent", cpu_phandle);
+ if (err) {
+ pr_err("unable to set ethernet interrupt-parent: %d\n", err);
+ return err;
+ }
+
err = fdt_setprop_u32(fdt, eth_off, "interrupts", cpu_eth_int);
if (err) {
pr_err("unable to set ethernet interrupts property: %d\n", err);
@@ -205,94 +137,29 @@ static __init int remove_gic(void *fdt)
return ehci_off;
}
- err = fdt_setprop_u32(fdt, ehci_off, "interrupts", cpu_ehci_int);
+ err = fdt_setprop_u32(fdt, ehci_off, "interrupt-parent", cpu_phandle);
if (err) {
- pr_err("unable to set EHCI interrupts property: %d\n", err);
+ pr_err("unable to set EHCI interrupt-parent: %d\n", err);
return err;
}
- return 0;
-}
-
-static __init int serial_config(void *fdt)
-{
- const char *yamontty, *mode_var;
- char mode_var_name[9], path[18], parity;
- unsigned int uart, baud, stop_bits;
- bool hw_flow;
- int chosen_off, err;
-
- yamontty = fw_getenv("yamontty");
- if (!yamontty || !strcmp(yamontty, "tty0")) {
- uart = 0;
- } else if (!strcmp(yamontty, "tty1")) {
- uart = 1;
- } else {
- pr_warn("yamontty environment variable '%s' invalid\n",
- yamontty);
- uart = 0;
- }
-
- baud = stop_bits = 0;
- parity = 0;
- hw_flow = false;
-
- snprintf(mode_var_name, sizeof(mode_var_name), "modetty%u", uart);
- mode_var = fw_getenv(mode_var_name);
- if (mode_var) {
- while (mode_var[0] >= '0' && mode_var[0] <= '9') {
- baud *= 10;
- baud += mode_var[0] - '0';
- mode_var++;
- }
- if (mode_var[0] == ',')
- mode_var++;
- if (mode_var[0])
- parity = mode_var[0];
- if (mode_var[0] == ',')
- mode_var++;
- if (mode_var[0])
- stop_bits = mode_var[0] - '0';
- if (mode_var[0] == ',')
- mode_var++;
- if (!strcmp(mode_var, "hw"))
- hw_flow = true;
- }
-
- if (!baud)
- baud = 38400;
-
- if (parity != 'e' && parity != 'n' && parity != 'o')
- parity = 'n';
-
- if (stop_bits != 7 && stop_bits != 8)
- stop_bits = 8;
-
- WARN_ON(snprintf(path, sizeof(path), "uart%u:%u%c%u%s",
- uart, baud, parity, stop_bits,
- hw_flow ? "r" : "") >= sizeof(path));
-
- /* find or add chosen node */
- chosen_off = fdt_path_offset(fdt, "/chosen");
- if (chosen_off == -FDT_ERR_NOTFOUND)
- chosen_off = fdt_path_offset(fdt, "/chosen@0");
- if (chosen_off == -FDT_ERR_NOTFOUND)
- chosen_off = fdt_add_subnode(fdt, 0, "chosen");
- if (chosen_off < 0) {
- pr_err("Unable to find or add DT chosen node: %d\n",
- chosen_off);
- return chosen_off;
- }
-
- err = fdt_setprop_string(fdt, chosen_off, "stdout-path", path);
+ err = fdt_setprop_u32(fdt, ehci_off, "interrupts", cpu_ehci_int);
if (err) {
- pr_err("Unable to set stdout-path property: %d\n", err);
+ pr_err("unable to set EHCI interrupts property: %d\n", err);
return err;
}
return 0;
}
+static const struct mips_fdt_fixup sead3_fdt_fixups[] __initconst = {
+ { yamon_dt_append_cmdline, "append command line" },
+ { append_memory, "append memory" },
+ { remove_gic, "remove GIC when not present" },
+ { yamon_dt_serial_config, "append serial configuration" },
+ { },
+};
+
static __init const void *sead3_fixup_fdt(const void *fdt,
const void *match_data)
{
@@ -307,29 +174,10 @@ static __init const void *sead3_fixup_fdt(const void *fdt,
fw_init_cmdline();
- err = fdt_open_into(fdt, fdt_buf, sizeof(fdt_buf));
- if (err)
- panic("Unable to open FDT: %d", err);
-
- err = append_cmdline(fdt_buf);
- if (err)
- panic("Unable to patch FDT: %d", err);
-
- err = append_memory(fdt_buf);
- if (err)
- panic("Unable to patch FDT: %d", err);
-
- err = remove_gic(fdt_buf);
- if (err)
- panic("Unable to patch FDT: %d", err);
-
- err = serial_config(fdt_buf);
- if (err)
- panic("Unable to patch FDT: %d", err);
-
- err = fdt_pack(fdt_buf);
+ err = apply_mips_fdt_fixups(fdt_buf, sizeof(fdt_buf),
+ fdt, sead3_fdt_fixups);
if (err)
- panic("Unable to pack FDT: %d\n", err);
+ panic("Unable to fixup FDT: %d", err);
return fdt_buf;
}
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 1231b5a17b37..3f32b376d30e 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -122,6 +122,33 @@ void __init device_tree_init(void)
err = register_up_smp_ops();
}
+int __init apply_mips_fdt_fixups(void *fdt_out, size_t fdt_out_size,
+ const void *fdt_in,
+ const struct mips_fdt_fixup *fixups)
+{
+ int err;
+
+ err = fdt_open_into(fdt_in, fdt_out, fdt_out_size);
+ if (err) {
+ pr_err("Failed to open FDT\n");
+ return err;
+ }
+
+ for (; fixups->apply; fixups++) {
+ err = fixups->apply(fdt_out);
+ if (err) {
+ pr_err("Failed to apply FDT fixup \"%s\"\n",
+ fixups->description);
+ return err;
+ }
+ }
+
+ err = fdt_pack(fdt_out);
+ if (err)
+ pr_err("Failed to pack FDT\n");
+ return err;
+}
+
void __init plat_time_init(void)
{
struct device_node *np;
diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
index f67fbf1c8541..3390e2f80b80 100644
--- a/arch/mips/generic/vmlinux.its.S
+++ b/arch/mips/generic/vmlinux.its.S
@@ -29,3 +29,28 @@
};
};
};
+
+#ifdef CONFIG_FIT_IMAGE_FDT_BOSTON
+/ {
+ images {
+ fdt@boston {
+ description = "img,boston Device Tree";
+ data = /incbin/("boot/dts/img/boston.dtb");
+ type = "flat_dt";
+ arch = "mips";
+ compression = "none";
+ hash@0 {
+ algo = "sha1";
+ };
+ };
+ };
+
+ configurations {
+ conf@boston {
+ description = "Boston Linux kernel";
+ kernel = "kernel@0";
+ fdt = "fdt@boston";
+ };
+ };
+};
+#endif /* CONFIG_FIT_IMAGE_FDT_BOSTON */
diff --git a/arch/mips/generic/yamon-dt.c b/arch/mips/generic/yamon-dt.c
new file mode 100644
index 000000000000..6077bca9b364
--- /dev/null
+++ b/arch/mips/generic/yamon-dt.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) "yamon-dt: " fmt
+
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/printk.h>
+
+#include <asm/fw/fw.h>
+#include <asm/yamon-dt.h>
+
+#define MAX_MEM_ARRAY_ENTRIES 2
+
+__init int yamon_dt_append_cmdline(void *fdt)
+{
+ int err, chosen_off;
+
+ /* find or add chosen node */
+ chosen_off = fdt_path_offset(fdt, "/chosen");
+ if (chosen_off == -FDT_ERR_NOTFOUND)
+ chosen_off = fdt_path_offset(fdt, "/chosen@0");
+ if (chosen_off == -FDT_ERR_NOTFOUND)
+ chosen_off = fdt_add_subnode(fdt, 0, "chosen");
+ if (chosen_off < 0) {
+ pr_err("Unable to find or add DT chosen node: %d\n",
+ chosen_off);
+ return chosen_off;
+ }
+
+ err = fdt_setprop_string(fdt, chosen_off, "bootargs", fw_getcmdline());
+ if (err) {
+ pr_err("Unable to set bootargs property: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static unsigned int __init gen_fdt_mem_array(
+ const struct yamon_mem_region *regions,
+ __be32 *mem_array,
+ unsigned int max_entries,
+ unsigned long memsize)
+{
+ const struct yamon_mem_region *mr;
+ unsigned long size;
+ unsigned int entries = 0;
+
+ for (mr = regions; mr->size && memsize; ++mr) {
+ if (entries >= max_entries) {
+ pr_warn("Number of regions exceeds max %u\n",
+ max_entries);
+ break;
+ }
+
+ /* How much of the remaining RAM fits in the next region? */
+ size = min_t(unsigned long, memsize, mr->size);
+ memsize -= size;
+
+ /* Emit a memory region */
+ *(mem_array++) = cpu_to_be32(mr->start);
+ *(mem_array++) = cpu_to_be32(size);
+ ++entries;
+
+ /* Discard the next mr->discard bytes */
+ memsize -= min_t(unsigned long, memsize, mr->discard);
+ }
+ return entries;
+}
+
+__init int yamon_dt_append_memory(void *fdt,
+ const struct yamon_mem_region *regions)
+{
+ unsigned long phys_memsize, memsize;
+ __be32 mem_array[2 * MAX_MEM_ARRAY_ENTRIES];
+ unsigned int mem_entries;
+ int i, err, mem_off;
+ char *var, param_name[10], *var_names[] = {
+ "ememsize", "memsize",
+ };
+
+ /* find memory size from the bootloader environment */
+ for (i = 0; i < ARRAY_SIZE(var_names); i++) {
+ var = fw_getenv(var_names[i]);
+ if (!var)
+ continue;
+
+ err = kstrtoul(var, 0, &phys_memsize);
+ if (!err)
+ break;
+
+ pr_warn("Failed to read the '%s' env variable '%s'\n",
+ var_names[i], var);
+ }
+
+ if (!phys_memsize) {
+ pr_warn("The bootloader didn't provide memsize: defaulting to 32MB\n");
+ phys_memsize = 32 << 20;
+ }
+
+ /* default to using all available RAM */
+ memsize = phys_memsize;
+
+ /* allow the user to override the usable memory */
+ for (i = 0; i < ARRAY_SIZE(var_names); i++) {
+ snprintf(param_name, sizeof(param_name), "%s=", var_names[i]);
+ var = strstr(arcs_cmdline, param_name);
+ if (!var)
+ continue;
+
+ memsize = memparse(var + strlen(param_name), NULL);
+ }
+
+ /* if the user says there's more RAM than we thought, believe them */
+ phys_memsize = max_t(unsigned long, phys_memsize, memsize);
+
+ /* find or add a memory node */
+ mem_off = fdt_path_offset(fdt, "/memory");
+ if (mem_off == -FDT_ERR_NOTFOUND)
+ mem_off = fdt_add_subnode(fdt, 0, "memory");
+ if (mem_off < 0) {
+ pr_err("Unable to find or add memory DT node: %d\n", mem_off);
+ return mem_off;
+ }
+
+ err = fdt_setprop_string(fdt, mem_off, "device_type", "memory");
+ if (err) {
+ pr_err("Unable to set memory node device_type: %d\n", err);
+ return err;
+ }
+
+ mem_entries = gen_fdt_mem_array(regions, mem_array,
+ MAX_MEM_ARRAY_ENTRIES, phys_memsize);
+ err = fdt_setprop(fdt, mem_off, "reg",
+ mem_array, mem_entries * 2 * sizeof(mem_array[0]));
+ if (err) {
+ pr_err("Unable to set memory regs property: %d\n", err);
+ return err;
+ }
+
+ mem_entries = gen_fdt_mem_array(regions, mem_array,
+ MAX_MEM_ARRAY_ENTRIES, memsize);
+ err = fdt_setprop(fdt, mem_off, "linux,usable-memory",
+ mem_array, mem_entries * 2 * sizeof(mem_array[0]));
+ if (err) {
+ pr_err("Unable to set linux,usable-memory property: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+__init int yamon_dt_serial_config(void *fdt)
+{
+ const char *yamontty, *mode_var;
+ char mode_var_name[9], path[20], parity;
+ unsigned int uart, baud, stop_bits;
+ bool hw_flow;
+ int chosen_off, err;
+
+ yamontty = fw_getenv("yamontty");
+ if (!yamontty || !strcmp(yamontty, "tty0")) {
+ uart = 0;
+ } else if (!strcmp(yamontty, "tty1")) {
+ uart = 1;
+ } else {
+ pr_warn("yamontty environment variable '%s' invalid\n",
+ yamontty);
+ uart = 0;
+ }
+
+ baud = stop_bits = 0;
+ parity = 0;
+ hw_flow = false;
+
+ snprintf(mode_var_name, sizeof(mode_var_name), "modetty%u", uart);
+ mode_var = fw_getenv(mode_var_name);
+ if (mode_var) {
+ while (mode_var[0] >= '0' && mode_var[0] <= '9') {
+ baud *= 10;
+ baud += mode_var[0] - '0';
+ mode_var++;
+ }
+ if (mode_var[0] == ',')
+ mode_var++;
+ if (mode_var[0])
+ parity = mode_var[0];
+ if (mode_var[0] == ',')
+ mode_var++;
+ if (mode_var[0])
+ stop_bits = mode_var[0] - '0';
+ if (mode_var[0] == ',')
+ mode_var++;
+ if (!strcmp(mode_var, "hw"))
+ hw_flow = true;
+ }
+
+ if (!baud)
+ baud = 38400;
+
+ if (parity != 'e' && parity != 'n' && parity != 'o')
+ parity = 'n';
+
+ if (stop_bits != 7 && stop_bits != 8)
+ stop_bits = 8;
+
+ WARN_ON(snprintf(path, sizeof(path), "serial%u:%u%c%u%s",
+ uart, baud, parity, stop_bits,
+ hw_flow ? "r" : "") >= sizeof(path));
+
+ /* find or add chosen node */
+ chosen_off = fdt_path_offset(fdt, "/chosen");
+ if (chosen_off == -FDT_ERR_NOTFOUND)
+ chosen_off = fdt_path_offset(fdt, "/chosen@0");
+ if (chosen_off == -FDT_ERR_NOTFOUND)
+ chosen_off = fdt_add_subnode(fdt, 0, "chosen");
+ if (chosen_off < 0) {
+ pr_err("Unable to find or add DT chosen node: %d\n",
+ chosen_off);
+ return chosen_off;
+ }
+
+ err = fdt_setprop_string(fdt, chosen_off, "stdout-path", path);
+ if (err) {
+ pr_err("Unable to set stdout-path property: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 2535c7b4c482..7c8aab23bce8 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -12,6 +12,8 @@ generic-y += mm-arch-hooks.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
+generic-y += qrwlock.h
+generic-y += qspinlock.h
generic-y += sections.h
generic-y += segment.h
generic-y += serial.h
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index de781cf54bc7..da80878f2c0d 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -74,10 +74,7 @@ static inline int compute_return_epc(struct pt_regs *regs)
return __microMIPS_compute_return_epc(regs);
if (cpu_has_mips16)
return __MIPS16e_compute_return_epc(regs);
- return regs->cp0_epc;
- }
-
- if (!delay_slot(regs)) {
+ } else if (!delay_slot(regs)) {
regs->cp0_epc += 4;
return 0;
}
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index b71ab4a5fd50..903f3bf48419 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -13,153 +13,107 @@
#include <asm/compiler.h>
#include <asm/war.h>
-static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
-{
- __u32 retval;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long dummy;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %0, %3 # xchg_u32 \n"
- " .set mips0 \n"
- " move %2, %z4 \n"
- " .set arch=r4000 \n"
- " sc %2, %1 \n"
- " beqzl %2, 1b \n"
- " .set mips0 \n"
- : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
- : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
- : "memory");
- } else if (kernel_uses_llsc) {
- unsigned long dummy;
-
- do {
- __asm__ __volatile__(
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " ll %0, %3 # xchg_u32 \n"
- " .set mips0 \n"
- " move %2, %z4 \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " sc %2, %1 \n"
- " .set mips0 \n"
- : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
- "=&r" (dummy)
- : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
- : "memory");
- } while (unlikely(!dummy));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- retval = *m;
- *m = val;
- raw_local_irq_restore(flags); /* implies memory barrier */
- }
-
- smp_llsc_mb();
-
- return retval;
-}
+/*
+ * Using a branch-likely instruction to check the result of an sc instruction
+ * works around a bug present in R10000 CPUs prior to revision 3.0 that could
+ * cause ll-sc sequences to execute non-atomically.
+ */
+#if R10000_LLSC_WAR
+# define __scbeqz "beqzl"
+#else
+# define __scbeqz "beqz"
+#endif
-#ifdef CONFIG_64BIT
-static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
-{
- __u64 retval;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long dummy;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %0, %3 # xchg_u64 \n"
- " move %2, %z4 \n"
- " scd %2, %1 \n"
- " beqzl %2, 1b \n"
- " .set mips0 \n"
- : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
- : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
- : "memory");
- } else if (kernel_uses_llsc) {
- unsigned long dummy;
-
- do {
- __asm__ __volatile__(
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " lld %0, %3 # xchg_u64 \n"
- " move %2, %z4 \n"
- " scd %2, %1 \n"
- " .set mips0 \n"
- : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
- "=&r" (dummy)
- : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
- : "memory");
- } while (unlikely(!dummy));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- retval = *m;
- *m = val;
- raw_local_irq_restore(flags); /* implies memory barrier */
- }
+/*
+ * These functions doesn't exist, so if they are called you'll either:
+ *
+ * - Get an error at compile-time due to __compiletime_error, if supported by
+ * your compiler.
+ *
+ * or:
+ *
+ * - Get an error at link-time due to the call to the missing function.
+ */
+extern unsigned long __cmpxchg_called_with_bad_pointer(void)
+ __compiletime_error("Bad argument size for cmpxchg");
+extern unsigned long __xchg_called_with_bad_pointer(void)
+ __compiletime_error("Bad argument size for xchg");
- smp_llsc_mb();
+#define __xchg_asm(ld, st, m, val) \
+({ \
+ __typeof(*(m)) __ret; \
+ \
+ if (kernel_uses_llsc) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set " MIPS_ISA_ARCH_LEVEL " \n" \
+ "1: " ld " %0, %2 # __xchg_asm \n" \
+ " .set mips0 \n" \
+ " move $1, %z3 \n" \
+ " .set " MIPS_ISA_ARCH_LEVEL " \n" \
+ " " st " $1, %1 \n" \
+ "\t" __scbeqz " $1, 1b \n" \
+ " .set pop \n" \
+ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
+ : "memory"); \
+ } else { \
+ unsigned long __flags; \
+ \
+ raw_local_irq_save(__flags); \
+ __ret = *m; \
+ *m = val; \
+ raw_local_irq_restore(__flags); \
+ } \
+ \
+ __ret; \
+})
- return retval;
-}
-#else
-extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
-#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
-#endif
+extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
+ unsigned int size);
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
+ int size)
{
switch (size) {
+ case 1:
+ case 2:
+ return __xchg_small(ptr, x, size);
+
case 4:
- return __xchg_u32(ptr, x);
+ return __xchg_asm("ll", "sc", (volatile u32 *)ptr, x);
+
case 8:
- return __xchg_u64(ptr, x);
- }
+ if (!IS_ENABLED(CONFIG_64BIT))
+ return __xchg_called_with_bad_pointer();
+
+ return __xchg_asm("lld", "scd", (volatile u64 *)ptr, x);
- return x;
+ default:
+ return __xchg_called_with_bad_pointer();
+ }
}
#define xchg(ptr, x) \
({ \
- BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \
+ __typeof__(*(ptr)) __res; \
\
- ((__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+ smp_mb__before_llsc(); \
+ \
+ __res = (__typeof__(*(ptr))) \
+ __xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
+ \
+ smp_llsc_mb(); \
+ \
+ __res; \
})
#define __cmpxchg_asm(ld, st, m, old, new) \
({ \
__typeof(*(m)) __ret; \
\
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " .set arch=r4000 \n" \
- "1: " ld " %0, %2 # __cmpxchg_asm \n" \
- " bne %0, %z3, 2f \n" \
- " .set mips0 \n" \
- " move $1, %z4 \n" \
- " .set arch=r4000 \n" \
- " " st " $1, %1 \n" \
- " beqzl $1, 1b \n" \
- "2: \n" \
- " .set pop \n" \
- : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
- : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
- : "memory"); \
- } else if (kernel_uses_llsc) { \
+ if (kernel_uses_llsc) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
@@ -170,7 +124,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
" move $1, %z4 \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
" " st " $1, %1 \n" \
- " beqz $1, 1b \n" \
+ "\t" __scbeqz " $1, 1b \n" \
" .set pop \n" \
"2: \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
@@ -189,44 +143,50 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
__ret; \
})
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid cmpxchg().
- */
-extern void __cmpxchg_called_with_bad_pointer(void);
+extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size);
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
+{
+ switch (size) {
+ case 1:
+ case 2:
+ return __cmpxchg_small(ptr, old, new, size);
+
+ case 4:
+ return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new);
+
+ case 8:
+ /* lld/scd are only available for MIPS64 */
+ if (!IS_ENABLED(CONFIG_64BIT))
+ return __cmpxchg_called_with_bad_pointer();
+
+ return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new);
-#define __cmpxchg(ptr, old, new, pre_barrier, post_barrier) \
+ default:
+ return __cmpxchg_called_with_bad_pointer();
+ }
+}
+
+#define cmpxchg_local(ptr, old, new) \
+ ((__typeof__(*(ptr))) \
+ __cmpxchg((ptr), \
+ (unsigned long)(__typeof__(*(ptr)))(old), \
+ (unsigned long)(__typeof__(*(ptr)))(new), \
+ sizeof(*(ptr))))
+
+#define cmpxchg(ptr, old, new) \
({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __res = 0; \
+ __typeof__(*(ptr)) __res; \
\
- pre_barrier; \
- \
- switch (sizeof(*(__ptr))) { \
- case 4: \
- __res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
- break; \
- case 8: \
- if (sizeof(long) == 8) { \
- __res = __cmpxchg_asm("lld", "scd", __ptr, \
- __old, __new); \
- break; \
- } \
- default: \
- __cmpxchg_called_with_bad_pointer(); \
- break; \
- } \
- \
- post_barrier; \
+ smp_mb__before_llsc(); \
+ __res = cmpxchg_local((ptr), (old), (new)); \
+ smp_llsc_mb(); \
\
__res; \
})
-#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb())
-#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, , )
-
#ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n) \
({ \
@@ -245,4 +205,6 @@ extern void __cmpxchg_called_with_bad_pointer(void);
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif
+#undef __scbeqz
+
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 494d38274142..8baa9033b181 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -138,6 +138,9 @@
#ifndef cpu_has_mips16
#define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16)
#endif
+#ifndef cpu_has_mips16e2
+#define cpu_has_mips16e2 (cpu_data[0].ases & MIPS_ASE_MIPS16E2)
+#endif
#ifndef cpu_has_mdmx
#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX)
#endif
@@ -487,6 +490,47 @@
# define cpu_has_perf (cpu_data[0].options & MIPS_CPU_PERF)
#endif
+#if defined(CONFIG_SMP) && defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
+/*
+ * Some systems share FTLB RAMs between threads within a core (siblings in
+ * kernel parlance). This means that FTLB entries may become invalid at almost
+ * any point when an entry is evicted due to a sibling thread writing an entry
+ * to the shared FTLB RAM.
+ *
+ * This is only relevant to SMP systems, and the only systems that exhibit this
+ * property implement MIPSr6 or higher so we constrain support for this to
+ * kernels that will run on such systems.
+ */
+# ifndef cpu_has_shared_ftlb_ram
+# define cpu_has_shared_ftlb_ram \
+ (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_RAM)
+# endif
+
+/*
+ * Some systems take this a step further & share FTLB entries between siblings.
+ * This is implemented as TLB writes happening as usual, but if an entry
+ * written by a sibling exists in the shared FTLB for a translation which would
+ * otherwise cause a TLB refill exception then the CPU will use the entry
+ * written by its sibling rather than triggering a refill & writing a matching
+ * TLB entry for itself.
+ *
+ * This is naturally only valid if a TLB entry is known to be suitable for use
+ * on all siblings in a CPU, and so it only takes effect when MMIDs are in use
+ * rather than ASIDs or when a TLB entry is marked global.
+ */
+# ifndef cpu_has_shared_ftlb_entries
+# define cpu_has_shared_ftlb_entries \
+ (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_ENTRIES)
+# endif
+#endif /* SMP && __mips_isa_rev >= 6 */
+
+#ifndef cpu_has_shared_ftlb_ram
+# define cpu_has_shared_ftlb_ram 0
+#endif
+#ifndef cpu_has_shared_ftlb_entries
+# define cpu_has_shared_ftlb_entries 0
+#endif
+
/*
* Guest capabilities
*/
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index bdd6dc18e65c..175fe565f4e1 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -84,6 +84,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R6
case CPU_I6400:
+ case CPU_I6500:
case CPU_P6600:
#endif
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 98f59307e6a3..d0c152b989f8 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -124,6 +124,7 @@
#define PRID_IMP_P5600 0xa800
#define PRID_IMP_I6400 0xa900
#define PRID_IMP_M6250 0xab00
+#define PRID_IMP_I6500 0xb000
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -247,6 +248,7 @@
#define PRID_REV_LOONGSON3B_R1 0x0006
#define PRID_REV_LOONGSON3B_R2 0x0007
#define PRID_REV_LOONGSON3A_R2 0x0008
+#define PRID_REV_LOONGSON3A_R3 0x0009
/*
* Older processors used to encode processor version and revision in two
@@ -322,7 +324,7 @@ enum cpu_type_enum {
*/
CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
- CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
+ CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, CPU_I6500,
CPU_QEMU_GENERIC,
@@ -416,6 +418,10 @@ enum cpu_type_enum {
#define MIPS_CPU_GUESTID MBIT_ULL(51) /* CPU uses VZ ASE GuestID feature */
#define MIPS_CPU_DRG MBIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */
#define MIPS_CPU_UFR MBIT_ULL(53) /* CPU supports User mode FR switching */
+#define MIPS_CPU_SHARED_FTLB_RAM \
+ MBIT_ULL(54) /* CPU shares FTLB RAM with another */
+#define MIPS_CPU_SHARED_FTLB_ENTRIES \
+ MBIT_ULL(55) /* CPU shares FTLB entries with another */
/*
* CPU ASE encodings
@@ -430,5 +436,6 @@ enum cpu_type_enum {
#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */
#define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
#define MIPS_ASE_DSP3 0x00000200 /* Signal Processing ASE Rev 3*/
+#define MIPS_ASE_MIPS16E2 0x00000400 /* MIPS16e2 */
#endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index ddd1c918103b..c5d351786416 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -18,7 +18,7 @@
#include <irq.h>
#define IRQ_STACK_SIZE THREAD_SIZE
-#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
+#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
extern void *irq_stack[NR_CPUS];
diff --git a/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h
index ade0356df257..e6a8108cde4e 100644
--- a/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h
@@ -40,6 +40,7 @@
#endif
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
diff --git a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h
index c5b6eef0efa7..bace5b9ae4df 100644
--- a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h
@@ -31,6 +31,7 @@
#define cpu_has_ejtag 1
#define cpu_has_llsc 1
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
diff --git a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h
index bc1167dbd4e3..b56cf10b91d3 100644
--- a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h
@@ -19,6 +19,7 @@
#define cpu_has_ejtag 1
#define cpu_has_llsc 1
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
diff --git a/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
index 30c5cd9fd973..291fe90aafa5 100644
--- a/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
@@ -37,6 +37,7 @@
#endif
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
diff --git a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
index 21eae03d752a..2ec10237688c 100644
--- a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
@@ -27,6 +27,7 @@
#define cpu_has_mcheck 0
#define cpu_has_ejtag 0
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
diff --git a/arch/mips/include/asm/mach-generic/mc146818rtc.h b/arch/mips/include/asm/mach-generic/mc146818rtc.h
index 0b9a942f079d..9c72e540ff56 100644
--- a/arch/mips/include/asm/mach-generic/mc146818rtc.h
+++ b/arch/mips/include/asm/mach-generic/mc146818rtc.h
@@ -27,7 +27,7 @@ static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
outb_p(data, RTC_PORT(1));
}
-#define RTC_ALWAYS_BCD 1
+#define RTC_ALWAYS_BCD 0
#ifndef mc146818_decode_year
#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)
diff --git a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
index 9b19b72dba56..b80d5eafc9db 100644
--- a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
@@ -19,6 +19,7 @@
#define cpu_has_32fpr 1
#define cpu_has_counter 1
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_divec 0
#define cpu_has_cache_cdex_p 1
#define cpu_has_prefetch 0
diff --git a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
index 7449794eade6..136d6d464e32 100644
--- a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
@@ -43,6 +43,7 @@
#define cpu_has_ejtag 0
#define cpu_has_llsc 1
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
diff --git a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
index 4cec06d133db..ba8b4e30b3e2 100644
--- a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
@@ -16,6 +16,7 @@
*/
#define cpu_has_watch 1
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_divec 0
#define cpu_has_vce 0
#define cpu_has_cache_cdex_p 0
diff --git a/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h
index 241409b78ff1..63b4c889094b 100644
--- a/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h
@@ -29,6 +29,7 @@
#define cpu_has_32fpr 1
#define cpu_has_counter 1
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_vce 0
#define cpu_has_cache_cdex_s 0
#define cpu_has_mcheck 0
diff --git a/arch/mips/include/asm/mach-jz4740/cpu-feature-overrides.h b/arch/mips/include/asm/mach-jz4740/cpu-feature-overrides.h
index 0933f94a1e69..7c5e576f9d96 100644
--- a/arch/mips/include/asm/mach-jz4740/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-jz4740/cpu-feature-overrides.h
@@ -23,6 +23,7 @@
#define cpu_has_ejtag 1
#define cpu_has_llsc 1
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h
index d3f3258b7cd4..9f9bb9c53785 100644
--- a/arch/mips/include/asm/mach-loongson64/boot_param.h
+++ b/arch/mips/include/asm/mach-loongson64/boot_param.h
@@ -27,12 +27,22 @@ struct efi_memory_map_loongson {
} __packed;
enum loongson_cpu_type {
- Loongson_2E = 0,
- Loongson_2F = 1,
- Loongson_3A = 2,
- Loongson_3B = 3,
- Loongson_1A = 4,
- Loongson_1B = 5
+ Legacy_2E = 0x0,
+ Legacy_2F = 0x1,
+ Legacy_3A = 0x2,
+ Legacy_3B = 0x3,
+ Legacy_1A = 0x4,
+ Legacy_1B = 0x5,
+ Legacy_2G = 0x6,
+ Legacy_2H = 0x7,
+ Loongson_1A = 0x100,
+ Loongson_1B = 0x101,
+ Loongson_2E = 0x200,
+ Loongson_2F = 0x201,
+ Loongson_2G = 0x202,
+ Loongson_2H = 0x203,
+ Loongson_3A = 0x300,
+ Loongson_3B = 0x301
};
/*
diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
index 89328a3d44d8..581915ce231c 100644
--- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
@@ -32,6 +32,7 @@
#define cpu_has_mcheck 0
#define cpu_has_mdmx 0
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_mips3d 0
#define cpu_has_mipsmt 0
#define cpu_has_smartmips 0
diff --git a/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h
index 091deb1700e5..0c29ff820bb9 100644
--- a/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h
@@ -13,6 +13,7 @@
#define cpu_has_4k_cache 1
#define cpu_has_watch 1
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_counter 1
#define cpu_has_divec 1
#define cpu_has_vce 0
diff --git a/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h b/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h
index b15307597ee3..6a1087ee8c6e 100644
--- a/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h
@@ -48,6 +48,7 @@
#define cpu_has_llsc 1
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
diff --git a/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
index d38be668e338..e1e182300fea 100644
--- a/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
@@ -17,6 +17,7 @@
#define cpu_has_counter 1
#define cpu_has_watch 0
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_divec 0
#define cpu_has_cache_cdex_p 1
#define cpu_has_prefetch 0
diff --git a/arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h
index 92927b62b5a0..7022358057fd 100644
--- a/arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h
@@ -13,6 +13,7 @@
*/
#define cpu_has_watch 1
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_divec 1
#define cpu_has_vce 0
#define cpu_has_cache_cdex_p 0
diff --git a/arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h
index 7f5144c6ce2d..b9d39dc45420 100644
--- a/arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h
@@ -6,6 +6,7 @@
#define cpu_has_inclusive_pcaches 0
#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h
index 6b444cd9526f..ecb6c7335484 100644
--- a/arch/mips/include/asm/machine.h
+++ b/arch/mips/include/asm/machine.h
@@ -60,4 +60,35 @@ mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt)
return NULL;
}
+/**
+ * struct mips_fdt_fixup - Describe a fixup to apply to an FDT
+ * @apply: applies the fixup to @fdt, returns zero on success else -errno
+ * @description: a short description of the fixup
+ *
+ * Describes a fixup applied to an FDT blob by the @apply function. The
+ * @description field provides a short description of the fixup intended for
+ * use in error messages if the @apply function returns non-zero.
+ */
+struct mips_fdt_fixup {
+ int (*apply)(void *fdt);
+ const char *description;
+};
+
+/**
+ * apply_mips_fdt_fixups() - apply fixups to an FDT blob
+ * @fdt_out: buffer in which to place the fixed-up FDT
+ * @fdt_out_size: the size of the @fdt_out buffer
+ * @fdt_in: the FDT blob
+ * @fixups: pointer to an array of fixups to be applied
+ *
+ * Loop through the array of fixups pointed to by @fixups, calling the apply
+ * function on each until either one returns an error or we reach the end of
+ * the list as indicated by an entry with a NULL apply field.
+ *
+ * Return: zero on success, else -errno
+ */
+extern int __init apply_mips_fdt_fixups(void *fdt_out, size_t fdt_out_size,
+ const void *fdt_in,
+ const struct mips_fdt_fixup *fixups);
+
#endif /* __MIPS_ASM_MACHINE_H__ */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 6875b69f59f7..dbb0eceda2c6 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -652,6 +652,7 @@
#define MIPS_CONF5_SBRI (_ULCAST_(1) << 6)
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
+#define MIPS_CONF5_CA2 (_ULCAST_(1) << 14)
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
#define MIPS_CONF5_CV (_ULCAST_(1) << 29)
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 702c273e67a9..e51add184717 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -47,8 +47,8 @@ typedef struct {
#define Elf_Mips_Rel Elf32_Rel
#define Elf_Mips_Rela Elf32_Rela
-#define ELF_MIPS_R_SYM(rel) ELF32_R_SYM(rel.r_info)
-#define ELF_MIPS_R_TYPE(rel) ELF32_R_TYPE(rel.r_info)
+#define ELF_MIPS_R_SYM(rel) ELF32_R_SYM((rel).r_info)
+#define ELF_MIPS_R_TYPE(rel) ELF32_R_TYPE((rel).r_info)
#endif
@@ -65,8 +65,8 @@ typedef struct {
#define Elf_Mips_Rel Elf64_Mips_Rel
#define Elf_Mips_Rela Elf64_Mips_Rela
-#define ELF_MIPS_R_SYM(rel) (rel.r_sym)
-#define ELF_MIPS_R_TYPE(rel) (rel.r_type)
+#define ELF_MIPS_R_SYM(rel) ((rel).r_sym)
+#define ELF_MIPS_R_TYPE(rel) ((rel).r_type)
#endif
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index a1bdb1ea5234..39b9f311c4ef 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -116,7 +116,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
pud_t *pud;
- pud = (pud_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PUD_ORDER);
+ pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
if (pud)
pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
return pud;
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 98a117a05fbc..bab3d41e5987 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -47,7 +47,7 @@ extern int __cpu_logical_map[NR_CPUS];
/* Mask of CPUs which are currently definitely operating coherently */
extern cpumask_t cpu_coherent_mask;
-extern void asmlinkage smp_bootstrap(void);
+extern asmlinkage void smp_bootstrap(void);
extern void calculate_cpu_foreign_map(void);
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index a8df44d60607..a7d21da16b6a 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -9,431 +9,9 @@
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H
-#include <linux/compiler.h>
-
-#include <asm/barrier.h>
#include <asm/processor.h>
-#include <asm/compiler.h>
-#include <asm/war.h>
-
-/*
- * Your basic SMP spinlocks, allowing only a single CPU anywhere
- *
- * Simple spin lock operations. There are two variants, one clears IRQ's
- * on the local processor, one does not.
- *
- * These are fair FIFO ticket locks
- *
- * (the type definitions are in asm/spinlock_types.h)
- */
-
-
-/*
- * Ticket locks are conceptually two parts, one indicating the current head of
- * the queue, and the other indicating the current tail. The lock is acquired
- * by atomically noting the tail and incrementing it by one (thus adding
- * ourself to the queue and noting our position), then waiting until the head
- * becomes equal to the the initial value of the tail.
- */
-
-static inline int arch_spin_is_locked(arch_spinlock_t *lock)
-{
- u32 counters = ACCESS_ONCE(lock->lock);
-
- return ((counters >> 16) ^ counters) & 0xffff;
-}
-
-static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
-{
- return lock.h.serving_now == lock.h.ticket;
-}
-
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- u16 owner = READ_ONCE(lock->h.serving_now);
- smp_rmb();
- for (;;) {
- arch_spinlock_t tmp = READ_ONCE(*lock);
-
- if (tmp.h.serving_now == tmp.h.ticket ||
- tmp.h.serving_now != owner)
- break;
-
- cpu_relax();
- }
- smp_acquire__after_ctrl_dep();
-}
-
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
- u32 counters = ACCESS_ONCE(lock->lock);
-
- return (((counters >> 16) - counters) & 0xffff) > 1;
-}
-#define arch_spin_is_contended arch_spin_is_contended
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- int my_ticket;
- int tmp;
- int inc = 0x10000;
-
- if (R10000_LLSC_WAR) {
- __asm__ __volatile__ (
- " .set push # arch_spin_lock \n"
- " .set noreorder \n"
- " \n"
- "1: ll %[ticket], %[ticket_ptr] \n"
- " addu %[my_ticket], %[ticket], %[inc] \n"
- " sc %[my_ticket], %[ticket_ptr] \n"
- " beqzl %[my_ticket], 1b \n"
- " nop \n"
- " srl %[my_ticket], %[ticket], 16 \n"
- " andi %[ticket], %[ticket], 0xffff \n"
- " bne %[ticket], %[my_ticket], 4f \n"
- " subu %[ticket], %[my_ticket], %[ticket] \n"
- "2: \n"
- " .subsection 2 \n"
- "4: andi %[ticket], %[ticket], 0xffff \n"
- " sll %[ticket], 5 \n"
- " \n"
- "6: bnez %[ticket], 6b \n"
- " subu %[ticket], 1 \n"
- " \n"
- " lhu %[ticket], %[serving_now_ptr] \n"
- " beq %[ticket], %[my_ticket], 2b \n"
- " subu %[ticket], %[my_ticket], %[ticket] \n"
- " b 4b \n"
- " subu %[ticket], %[ticket], 1 \n"
- " .previous \n"
- " .set pop \n"
- : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
- [serving_now_ptr] "+m" (lock->h.serving_now),
- [ticket] "=&r" (tmp),
- [my_ticket] "=&r" (my_ticket)
- : [inc] "r" (inc));
- } else {
- __asm__ __volatile__ (
- " .set push # arch_spin_lock \n"
- " .set noreorder \n"
- " \n"
- "1: ll %[ticket], %[ticket_ptr] \n"
- " addu %[my_ticket], %[ticket], %[inc] \n"
- " sc %[my_ticket], %[ticket_ptr] \n"
- " beqz %[my_ticket], 1b \n"
- " srl %[my_ticket], %[ticket], 16 \n"
- " andi %[ticket], %[ticket], 0xffff \n"
- " bne %[ticket], %[my_ticket], 4f \n"
- " subu %[ticket], %[my_ticket], %[ticket] \n"
- "2: .insn \n"
- " .subsection 2 \n"
- "4: andi %[ticket], %[ticket], 0xffff \n"
- " sll %[ticket], 5 \n"
- " \n"
- "6: bnez %[ticket], 6b \n"
- " subu %[ticket], 1 \n"
- " \n"
- " lhu %[ticket], %[serving_now_ptr] \n"
- " beq %[ticket], %[my_ticket], 2b \n"
- " subu %[ticket], %[my_ticket], %[ticket] \n"
- " b 4b \n"
- " subu %[ticket], %[ticket], 1 \n"
- " .previous \n"
- " .set pop \n"
- : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
- [serving_now_ptr] "+m" (lock->h.serving_now),
- [ticket] "=&r" (tmp),
- [my_ticket] "=&r" (my_ticket)
- : [inc] "r" (inc));
- }
-
- smp_llsc_mb();
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- unsigned int serving_now = lock->h.serving_now + 1;
- wmb();
- lock->h.serving_now = (u16)serving_now;
- nudge_writes();
-}
-
-static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
-{
- int tmp, tmp2, tmp3;
- int inc = 0x10000;
-
- if (R10000_LLSC_WAR) {
- __asm__ __volatile__ (
- " .set push # arch_spin_trylock \n"
- " .set noreorder \n"
- " \n"
- "1: ll %[ticket], %[ticket_ptr] \n"
- " srl %[my_ticket], %[ticket], 16 \n"
- " andi %[now_serving], %[ticket], 0xffff \n"
- " bne %[my_ticket], %[now_serving], 3f \n"
- " addu %[ticket], %[ticket], %[inc] \n"
- " sc %[ticket], %[ticket_ptr] \n"
- " beqzl %[ticket], 1b \n"
- " li %[ticket], 1 \n"
- "2: \n"
- " .subsection 2 \n"
- "3: b 2b \n"
- " li %[ticket], 0 \n"
- " .previous \n"
- " .set pop \n"
- : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
- [ticket] "=&r" (tmp),
- [my_ticket] "=&r" (tmp2),
- [now_serving] "=&r" (tmp3)
- : [inc] "r" (inc));
- } else {
- __asm__ __volatile__ (
- " .set push # arch_spin_trylock \n"
- " .set noreorder \n"
- " \n"
- "1: ll %[ticket], %[ticket_ptr] \n"
- " srl %[my_ticket], %[ticket], 16 \n"
- " andi %[now_serving], %[ticket], 0xffff \n"
- " bne %[my_ticket], %[now_serving], 3f \n"
- " addu %[ticket], %[ticket], %[inc] \n"
- " sc %[ticket], %[ticket_ptr] \n"
- " beqz %[ticket], 1b \n"
- " li %[ticket], 1 \n"
- "2: .insn \n"
- " .subsection 2 \n"
- "3: b 2b \n"
- " li %[ticket], 0 \n"
- " .previous \n"
- " .set pop \n"
- : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
- [ticket] "=&r" (tmp),
- [my_ticket] "=&r" (tmp2),
- [now_serving] "=&r" (tmp3)
- : [inc] "r" (inc));
- }
-
- smp_llsc_mb();
-
- return tmp;
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- *
- * NOTE! it is quite common to have readers in interrupts but no interrupt
- * writers. For those circumstances we can "mix" irq-safe locks - any writer
- * needs to get a irq-safe write-lock, but readers can get non-irqsafe
- * read-locks.
- */
-
-/*
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(rw) ((rw)->lock >= 0)
-
-/*
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(rw) (!(rw)->lock)
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- unsigned int tmp;
-
- if (R10000_LLSC_WAR) {
- __asm__ __volatile__(
- " .set noreorder # arch_read_lock \n"
- "1: ll %1, %2 \n"
- " bltz %1, 1b \n"
- " addu %1, 1 \n"
- " sc %1, %0 \n"
- " beqzl %1, 1b \n"
- " nop \n"
- " .set reorder \n"
- : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF_SMALL_ASM() (rw->lock)
- : "memory");
- } else {
- do {
- __asm__ __volatile__(
- "1: ll %1, %2 # arch_read_lock \n"
- " bltz %1, 1b \n"
- " addu %1, 1 \n"
- "2: sc %1, %0 \n"
- : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF_SMALL_ASM() (rw->lock)
- : "memory");
- } while (unlikely(!tmp));
- }
-
- smp_llsc_mb();
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- unsigned int tmp;
-
- smp_mb__before_llsc();
-
- if (R10000_LLSC_WAR) {
- __asm__ __volatile__(
- "1: ll %1, %2 # arch_read_unlock \n"
- " addiu %1, -1 \n"
- " sc %1, %0 \n"
- " beqzl %1, 1b \n"
- : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF_SMALL_ASM() (rw->lock)
- : "memory");
- } else {
- do {
- __asm__ __volatile__(
- "1: ll %1, %2 # arch_read_unlock \n"
- " addiu %1, -1 \n"
- " sc %1, %0 \n"
- : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF_SMALL_ASM() (rw->lock)
- : "memory");
- } while (unlikely(!tmp));
- }
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- unsigned int tmp;
-
- if (R10000_LLSC_WAR) {
- __asm__ __volatile__(
- " .set noreorder # arch_write_lock \n"
- "1: ll %1, %2 \n"
- " bnez %1, 1b \n"
- " lui %1, 0x8000 \n"
- " sc %1, %0 \n"
- " beqzl %1, 1b \n"
- " nop \n"
- " .set reorder \n"
- : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF_SMALL_ASM() (rw->lock)
- : "memory");
- } else {
- do {
- __asm__ __volatile__(
- "1: ll %1, %2 # arch_write_lock \n"
- " bnez %1, 1b \n"
- " lui %1, 0x8000 \n"
- "2: sc %1, %0 \n"
- : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF_SMALL_ASM() (rw->lock)
- : "memory");
- } while (unlikely(!tmp));
- }
-
- smp_llsc_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- smp_mb__before_llsc();
-
- __asm__ __volatile__(
- " # arch_write_unlock \n"
- " sw $0, %0 \n"
- : "=m" (rw->lock)
- : "m" (rw->lock)
- : "memory");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- unsigned int tmp;
- int ret;
-
- if (R10000_LLSC_WAR) {
- __asm__ __volatile__(
- " .set noreorder # arch_read_trylock \n"
- " li %2, 0 \n"
- "1: ll %1, %3 \n"
- " bltz %1, 2f \n"
- " addu %1, 1 \n"
- " sc %1, %0 \n"
- " .set reorder \n"
- " beqzl %1, 1b \n"
- " nop \n"
- __WEAK_LLSC_MB
- " li %2, 1 \n"
- "2: \n"
- : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
- : GCC_OFF_SMALL_ASM() (rw->lock)
- : "memory");
- } else {
- __asm__ __volatile__(
- " .set noreorder # arch_read_trylock \n"
- " li %2, 0 \n"
- "1: ll %1, %3 \n"
- " bltz %1, 2f \n"
- " addu %1, 1 \n"
- " sc %1, %0 \n"
- " beqz %1, 1b \n"
- " nop \n"
- " .set reorder \n"
- __WEAK_LLSC_MB
- " li %2, 1 \n"
- "2: .insn \n"
- : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
- : GCC_OFF_SMALL_ASM() (rw->lock)
- : "memory");
- }
-
- return ret;
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- unsigned int tmp;
- int ret;
-
- if (R10000_LLSC_WAR) {
- __asm__ __volatile__(
- " .set noreorder # arch_write_trylock \n"
- " li %2, 0 \n"
- "1: ll %1, %3 \n"
- " bnez %1, 2f \n"
- " lui %1, 0x8000 \n"
- " sc %1, %0 \n"
- " beqzl %1, 1b \n"
- " nop \n"
- __WEAK_LLSC_MB
- " li %2, 1 \n"
- " .set reorder \n"
- "2: \n"
- : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
- : GCC_OFF_SMALL_ASM() (rw->lock)
- : "memory");
- } else {
- do {
- __asm__ __volatile__(
- " ll %1, %3 # arch_write_trylock \n"
- " li %2, 0 \n"
- " bnez %1, 2f \n"
- " lui %1, 0x8000 \n"
- " sc %1, %0 \n"
- " li %2, 1 \n"
- "2: .insn \n"
- : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
- "=&r" (ret)
- : GCC_OFF_SMALL_ASM() (rw->lock)
- : "memory");
- } while (unlikely(!tmp));
-
- smp_llsc_mb();
- }
-
- return ret;
-}
+#include <asm/qrwlock.h>
+#include <asm/qspinlock.h>
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
index 9b2528e612c0..177e722eb96c 100644
--- a/arch/mips/include/asm/spinlock_types.h
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -1,37 +1,7 @@
#ifndef _ASM_SPINLOCK_TYPES_H
#define _ASM_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
-#endif
-
-#include <linux/types.h>
-
-#include <asm/byteorder.h>
-
-typedef union {
- /*
- * bits 0..15 : serving_now
- * bits 16..31 : ticket
- */
- u32 lock;
- struct {
-#ifdef __BIG_ENDIAN
- u16 ticket;
- u16 serving_now;
-#else
- u16 serving_now;
- u16 ticket;
-#endif
- } h;
-} arch_spinlock_t;
-
-#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0 }
-
-typedef struct {
- volatile unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
#endif
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index d87882513ee3..7c713025b23f 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -85,7 +85,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
{
if (error) {
regs->regs[2] = -error;
- regs->regs[7] = -1;
+ regs->regs[7] = 1;
} else {
regs->regs[2] = val;
regs->regs[7] = 0;
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 9700251159b1..b71306947290 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -497,283 +497,6 @@ do { \
extern void __put_user_unknown(void);
/*
- * ul{b,h,w} are macros and there are no equivalent macros for EVA.
- * EVA unaligned access is handled in the ADE exception handler.
- */
-#ifndef CONFIG_EVA
-/*
- * put_user_unaligned: - Write a simple value into user space.
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
- *
- * Returns zero on success, or -EFAULT on error.
- */
-#define put_user_unaligned(x,ptr) \
- __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
-
-/*
- * get_user_unaligned: - Get a simple variable from user space.
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and the result of
- * dereferencing @ptr must be assignable to @x without a cast.
- *
- * Returns zero on success, or -EFAULT on error.
- * On error, the variable @x is set to zero.
- */
-#define get_user_unaligned(x,ptr) \
- __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
-
-/*
- * __put_user_unaligned: - Write a simple value into user space, with less checking.
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
- *
- * Caller must check the pointer with access_ok() before calling this
- * function.
- *
- * Returns zero on success, or -EFAULT on error.
- */
-#define __put_user_unaligned(x,ptr) \
- __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
-
-/*
- * __get_user_unaligned: - Get a simple variable from user space, with less checking.
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and the result of
- * dereferencing @ptr must be assignable to @x without a cast.
- *
- * Caller must check the pointer with access_ok() before calling this
- * function.
- *
- * Returns zero on success, or -EFAULT on error.
- * On error, the variable @x is set to zero.
- */
-#define __get_user_unaligned(x,ptr) \
- __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
-
-/*
- * Yuck. We need two variants, one for 64bit operation and one
- * for 32 bit mode and old iron.
- */
-#ifdef CONFIG_32BIT
-#define __GET_USER_UNALIGNED_DW(val, ptr) \
- __get_user_unaligned_asm_ll32(val, ptr)
-#endif
-#ifdef CONFIG_64BIT
-#define __GET_USER_UNALIGNED_DW(val, ptr) \
- __get_user_unaligned_asm(val, "uld", ptr)
-#endif
-
-extern void __get_user_unaligned_unknown(void);
-
-#define __get_user_unaligned_common(val, size, ptr) \
-do { \
- switch (size) { \
- case 1: __get_data_asm(val, "lb", ptr); break; \
- case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \
- case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \
- case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
- default: __get_user_unaligned_unknown(); break; \
- } \
-} while (0)
-
-#define __get_user_unaligned_nocheck(x,ptr,size) \
-({ \
- int __gu_err; \
- \
- __get_user_unaligned_common((x), size, ptr); \
- __gu_err; \
-})
-
-#define __get_user_unaligned_check(x,ptr,size) \
-({ \
- int __gu_err = -EFAULT; \
- const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
- \
- if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
- __get_user_unaligned_common((x), size, __gu_ptr); \
- \
- __gu_err; \
-})
-
-#define __get_data_unaligned_asm(val, insn, addr) \
-{ \
- long __gu_tmp; \
- \
- __asm__ __volatile__( \
- "1: " insn " %1, %3 \n" \
- "2: \n" \
- " .insn \n" \
- " .section .fixup,\"ax\" \n" \
- "3: li %0, %4 \n" \
- " move %1, $0 \n" \
- " j 2b \n" \
- " .previous \n" \
- " .section __ex_table,\"a\" \n" \
- " "__UA_ADDR "\t1b, 3b \n" \
- " "__UA_ADDR "\t1b + 4, 3b \n" \
- " .previous \n" \
- : "=r" (__gu_err), "=r" (__gu_tmp) \
- : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
- \
- (val) = (__typeof__(*(addr))) __gu_tmp; \
-}
-
-/*
- * Get a long long 64 using 32 bit registers.
- */
-#define __get_user_unaligned_asm_ll32(val, addr) \
-{ \
- unsigned long long __gu_tmp; \
- \
- __asm__ __volatile__( \
- "1: ulw %1, (%3) \n" \
- "2: ulw %D1, 4(%3) \n" \
- " move %0, $0 \n" \
- "3: \n" \
- " .insn \n" \
- " .section .fixup,\"ax\" \n" \
- "4: li %0, %4 \n" \
- " move %1, $0 \n" \
- " move %D1, $0 \n" \
- " j 3b \n" \
- " .previous \n" \
- " .section __ex_table,\"a\" \n" \
- " " __UA_ADDR " 1b, 4b \n" \
- " " __UA_ADDR " 1b + 4, 4b \n" \
- " " __UA_ADDR " 2b, 4b \n" \
- " " __UA_ADDR " 2b + 4, 4b \n" \
- " .previous \n" \
- : "=r" (__gu_err), "=&r" (__gu_tmp) \
- : "0" (0), "r" (addr), "i" (-EFAULT)); \
- (val) = (__typeof__(*(addr))) __gu_tmp; \
-}
-
-/*
- * Yuck. We need two variants, one for 64bit operation and one
- * for 32 bit mode and old iron.
- */
-#ifdef CONFIG_32BIT
-#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
-#endif
-#ifdef CONFIG_64BIT
-#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
-#endif
-
-#define __put_user_unaligned_common(ptr, size) \
-do { \
- switch (size) { \
- case 1: __put_data_asm("sb", ptr); break; \
- case 2: __put_user_unaligned_asm("ush", ptr); break; \
- case 4: __put_user_unaligned_asm("usw", ptr); break; \
- case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
- default: __put_user_unaligned_unknown(); break; \
-} while (0)
-
-#define __put_user_unaligned_nocheck(x,ptr,size) \
-({ \
- __typeof__(*(ptr)) __pu_val; \
- int __pu_err = 0; \
- \
- __pu_val = (x); \
- __put_user_unaligned_common(ptr, size); \
- __pu_err; \
-})
-
-#define __put_user_unaligned_check(x,ptr,size) \
-({ \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __typeof__(*(ptr)) __pu_val = (x); \
- int __pu_err = -EFAULT; \
- \
- if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
- __put_user_unaligned_common(__pu_addr, size); \
- \
- __pu_err; \
-})
-
-#define __put_user_unaligned_asm(insn, ptr) \
-{ \
- __asm__ __volatile__( \
- "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
- "2: \n" \
- " .insn \n" \
- " .section .fixup,\"ax\" \n" \
- "3: li %0, %4 \n" \
- " j 2b \n" \
- " .previous \n" \
- " .section __ex_table,\"a\" \n" \
- " " __UA_ADDR " 1b, 3b \n" \
- " .previous \n" \
- : "=r" (__pu_err) \
- : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
- "i" (-EFAULT)); \
-}
-
-#define __put_user_unaligned_asm_ll32(ptr) \
-{ \
- __asm__ __volatile__( \
- "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
- "2: sw %D2, 4(%3) \n" \
- "3: \n" \
- " .insn \n" \
- " .section .fixup,\"ax\" \n" \
- "4: li %0, %4 \n" \
- " j 3b \n" \
- " .previous \n" \
- " .section __ex_table,\"a\" \n" \
- " " __UA_ADDR " 1b, 4b \n" \
- " " __UA_ADDR " 1b + 4, 4b \n" \
- " " __UA_ADDR " 2b, 4b \n" \
- " " __UA_ADDR " 2b + 4, 4b \n" \
- " .previous" \
- : "=r" (__pu_err) \
- : "0" (0), "r" (__pu_val), "r" (ptr), \
- "i" (-EFAULT)); \
-}
-
-extern void __put_user_unaligned_unknown(void);
-#endif
-
-/*
* We're generating jump to subroutines which will be outside the range of
* jump instructions
*/
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 3748f4d120a5..59dae37f6b8d 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -72,9 +72,12 @@ Ip_u1u2s3(_beq);
Ip_u1u2s3(_beql);
Ip_u1s2(_bgez);
Ip_u1s2(_bgezl);
+Ip_u1s2(_bgtz);
+Ip_u1s2(_blez);
Ip_u1s2(_bltz);
Ip_u1s2(_bltzl);
Ip_u1u2s3(_bne);
+Ip_u1(_break);
Ip_u2s3u1(_cache);
Ip_u1u2(_cfc1);
Ip_u2u1(_cfcmsa);
@@ -82,19 +85,28 @@ Ip_u1u2(_ctc1);
Ip_u2u1(_ctcmsa);
Ip_u2u1s3(_daddiu);
Ip_u3u1u2(_daddu);
+Ip_u1u2(_ddivu);
Ip_u1(_di);
Ip_u2u1msbu3(_dins);
Ip_u2u1msbu3(_dinsm);
+Ip_u2u1msbu3(_dinsu);
Ip_u1u2(_divu);
Ip_u1u2u3(_dmfc0);
Ip_u1u2u3(_dmtc0);
+Ip_u1u2(_dmultu);
Ip_u2u1u3(_drotr);
Ip_u2u1u3(_drotr32);
+Ip_u2u1(_dsbh);
+Ip_u2u1(_dshd);
Ip_u2u1u3(_dsll);
Ip_u2u1u3(_dsll32);
+Ip_u3u2u1(_dsllv);
Ip_u2u1u3(_dsra);
+Ip_u2u1u3(_dsra32);
+Ip_u3u2u1(_dsrav);
Ip_u2u1u3(_dsrl);
Ip_u2u1u3(_dsrl32);
+Ip_u3u2u1(_dsrlv);
Ip_u3u1u2(_dsubu);
Ip_0(_eret);
Ip_u2u1msbu3(_ext);
@@ -104,6 +116,7 @@ Ip_u1(_jal);
Ip_u2u1(_jalr);
Ip_u1(_jr);
Ip_u2s3u1(_lb);
+Ip_u2s3u1(_lbu);
Ip_u2s3u1(_ld);
Ip_u3u1u2(_ldx);
Ip_u2s3u1(_lh);
@@ -112,27 +125,35 @@ Ip_u2s3u1(_ll);
Ip_u2s3u1(_lld);
Ip_u1s2(_lui);
Ip_u2s3u1(_lw);
+Ip_u2s3u1(_lwu);
Ip_u3u1u2(_lwx);
Ip_u1u2u3(_mfc0);
Ip_u1u2u3(_mfhc0);
Ip_u1(_mfhi);
Ip_u1(_mflo);
+Ip_u3u1u2(_movn);
+Ip_u3u1u2(_movz);
Ip_u1u2u3(_mtc0);
Ip_u1u2u3(_mthc0);
Ip_u1(_mthi);
Ip_u1(_mtlo);
Ip_u3u1u2(_mul);
+Ip_u1u2(_multu);
+Ip_u3u1u2(_nor);
Ip_u3u1u2(_or);
Ip_u2u1u3(_ori);
Ip_u2s3u1(_pref);
Ip_0(_rfe);
Ip_u2u1u3(_rotr);
+Ip_u2s3u1(_sb);
Ip_u2s3u1(_sc);
Ip_u2s3u1(_scd);
Ip_u2s3u1(_sd);
+Ip_u2s3u1(_sh);
Ip_u2u1u3(_sll);
Ip_u3u2u1(_sllv);
Ip_s3s1s2(_slt);
+Ip_u2u1s3(_slti);
Ip_u2u1s3(_sltiu);
Ip_u3u1u2(_sltu);
Ip_u2u1u3(_sra);
@@ -248,6 +269,15 @@ static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
uasm_i_dsrl32(p, a1, a2, a3 - 32);
}
+static inline void uasm_i_dsra_safe(u32 **p, unsigned int a1,
+ unsigned int a2, unsigned int a3)
+{
+ if (a3 < 32)
+ uasm_i_dsra(p, a1, a2, a3);
+ else
+ uasm_i_dsra32(p, a1, a2, a3 - 32);
+}
+
/* Handle relocations. */
struct uasm_reloc {
u32 *addr;
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h
index 8f4ca5dd992b..b7cd6cf77b83 100644
--- a/arch/mips/include/asm/vdso.h
+++ b/arch/mips/include/asm/vdso.h
@@ -79,8 +79,8 @@ union mips_vdso_data {
struct {
u64 xtime_sec;
u64 xtime_nsec;
- u32 wall_to_mono_sec;
- u32 wall_to_mono_nsec;
+ u64 wall_to_mono_sec;
+ u64 wall_to_mono_nsec;
u32 seq_count;
u32 cs_shift;
u8 clock_mode;
diff --git a/arch/mips/include/asm/yamon-dt.h b/arch/mips/include/asm/yamon-dt.h
new file mode 100644
index 000000000000..485cfe3e45e1
--- /dev/null
+++ b/arch/mips/include/asm/yamon-dt.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_YAMON_DT_H__
+#define __MIPS_ASM_YAMON_DT_H__
+
+#include <linux/types.h>
+
+/**
+ * struct yamon_mem_region - Represents a contiguous range of physical RAM.
+ * @start: Start physical address.
+ * @size: Maximum size of region.
+ * @discard: Length of additional memory to discard after the region.
+ */
+struct yamon_mem_region {
+ phys_addr_t start;
+ phys_addr_t size;
+ phys_addr_t discard;
+};
+
+/**
+ * yamon_dt_append_cmdline() - Append YAMON-provided command line to /chosen
+ * @fdt: the FDT blob
+ *
+ * Write the YAMON-provided command line to the bootargs property of the
+ * /chosen node in @fdt.
+ *
+ * Return: 0 on success, else -errno
+ */
+extern __init int yamon_dt_append_cmdline(void *fdt);
+
+/**
+ * yamon_dt_append_memory() - Append YAMON-provided memory info to /memory
+ * @fdt: the FDT blob
+ * @regions: zero size terminated array of physical memory regions
+ *
+ * Generate a /memory node in @fdt based upon memory size information provided
+ * by YAMON in its environment and the @regions array.
+ *
+ * Return: 0 on success, else -errno
+ */
+extern __init int yamon_dt_append_memory(void *fdt,
+ const struct yamon_mem_region *regions);
+
+/**
+ * yamon_dt_serial_config() - Append YAMON-provided serial config to /chosen
+ * @fdt: the FDT blob
+ *
+ * Generate a stdout-path property in the /chosen node of @fdt, based upon
+ * information provided in the YAMON environment about the UART configuration
+ * of the system.
+ *
+ * Return: 0 on success, else -errno
+ */
+extern __init int yamon_dt_serial_config(void *fdt);
+
+#endif /* __MIPS_ASM_YAMON_DT_H__ */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index b5e46ae872d3..d61897535926 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -276,12 +276,19 @@ enum lx_func {
*/
enum bshfl_func {
wsbh_op = 0x2,
- dshd_op = 0x5,
seb_op = 0x10,
seh_op = 0x18,
};
/*
+ * DBSHFL opcodes
+ */
+enum dbshfl_func {
+ dsbh_op = 0x2,
+ dshd_op = 0x5,
+};
+
+/*
* MSA minor opcodes.
*/
enum msa_func {
@@ -755,6 +762,16 @@ struct msa_mi10_format { /* MSA MI10 */
;))))))
};
+struct dsp_format { /* SPEC3 DSP format instructions */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int base : 5,
+ __BITFIELD_FIELD(unsigned int index : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int op : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
struct spec3_format { /* SPEC3 */
__BITFIELD_FIELD(unsigned int opcode:6,
__BITFIELD_FIELD(unsigned int rs:5,
@@ -1046,6 +1063,7 @@ union mips_instruction {
struct b_format b_format;
struct ps_format ps_format;
struct v_format v_format;
+ struct dsp_format dsp_format;
struct spec3_format spec3_format;
struct fb_format fb_format;
struct fp0_format fp0_format;
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 9a0e37b92ce0..46c0581256f1 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -4,7 +4,7 @@
extra-y := head.o vmlinux.lds
-obj-y += cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \
+obj-y += cmpxchg.o cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \
process.o prom.o ptrace.o reset.o setup.o signal.o \
syscall.o time.o topology.o traps.o unaligned.o watch.o \
vdso.o cacheinfo.o
@@ -31,7 +31,6 @@ obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
obj-$(CONFIG_DEBUG_FS) += segment.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index f702a459a830..b79ed9af9886 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -399,7 +399,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
*
* @regs: Pointer to pt_regs
* @insn: branch instruction to decode
- * @returns: -EFAULT on error and forces SIGBUS, and on success
+ * @returns: -EFAULT on error and forces SIGILL, and on success
* returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
* evaluating the branch.
*
@@ -431,7 +431,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/* Fall through */
case jr_op:
if (NO_R6EMU && insn.r_format.func == jr_op)
- goto sigill_r6;
+ goto sigill_r2r6;
regs->cp0_epc = regs->regs[insn.r_format.rs];
break;
}
@@ -446,7 +446,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
switch (insn.i_format.rt) {
case bltzl_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bltz_op:
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -459,7 +459,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bgezl_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bgez_op:
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -473,10 +473,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bltzal_op:
case bltzall_op:
if (NO_R6EMU && (insn.i_format.rs ||
- insn.i_format.rt == bltzall_op)) {
- ret = -SIGILL;
- break;
- }
+ insn.i_format.rt == bltzall_op))
+ goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a NAL
@@ -507,10 +505,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bgezal_op:
case bgezall_op:
if (NO_R6EMU && (insn.i_format.rs ||
- insn.i_format.rt == bgezall_op)) {
- ret = -SIGILL;
- break;
- }
+ insn.i_format.rt == bgezall_op))
+ goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a BAL
@@ -556,6 +552,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/*
* These are unconditional and in j_format.
*/
+ case jalx_op:
case jal_op:
regs->regs[31] = regs->cp0_epc + 8;
case j_op:
@@ -573,7 +570,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
*/
case beql_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case beq_op:
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt]) {
@@ -587,7 +584,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bnel_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bne_op:
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt]) {
@@ -601,7 +598,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case blezl_op: /* not really i_format */
if (!insn.i_format.rt && NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case blez_op:
/*
* Compact branches for R6 for the
@@ -636,7 +633,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bgtzl_op:
if (!insn.i_format.rt && NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bgtz_op:
/*
* Compact branches for R6 for the
@@ -774,35 +771,27 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
#else
case bc6_op:
/* Only valid for MIPS R6 */
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
regs->cp0_epc += 8;
break;
case balc6_op:
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/* Compact branch: BALC */
regs->regs[31] = epc + 4;
epc += 4 + (insn.i_format.simmediate << 2);
regs->cp0_epc = epc;
break;
case pop66_op:
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/* Compact branch: BEQZC || JIC */
regs->cp0_epc += 8;
break;
case pop76_op:
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/* Compact branch: BNEZC || JIALC */
if (!insn.i_format.rs) {
/* JIALC: set $31/ra */
@@ -814,10 +803,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case pop10_op:
case pop30_op:
/* Only valid for MIPS R6 */
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/*
* Compact branches:
* bovc, beqc, beqzalc, bnvc, bnec, bnezlac
@@ -831,12 +818,18 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
return ret;
sigill_dsp:
- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
- force_sig(SIGBUS, current);
+ pr_debug("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL, current);
+ return -EFAULT;
+sigill_r2r6:
+ pr_debug("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL, current);
return -EFAULT;
sigill_r6:
- pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
- current->comm);
+ pr_debug("%s: R6 branch but no MIPSr6 ISA support - sending SIGILL.\n",
+ current->comm);
force_sig(SIGILL, current);
return -EFAULT;
}
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
new file mode 100644
index 000000000000..7730f1d3434f
--- /dev/null
+++ b/arch/mips/kernel/cmpxchg.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2017 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <asm/cmpxchg.h>
+
+unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
+{
+ u32 old32, new32, load32, mask;
+ volatile u32 *ptr32;
+ unsigned int shift;
+
+ /* Check that ptr is naturally aligned */
+ WARN_ON((unsigned long)ptr & (size - 1));
+
+ /* Mask value to the correct size. */
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+ val &= mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * exchange within the naturally aligned 4 byte integerthat includes
+ * it.
+ */
+ shift = (unsigned long)ptr & 0x3;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ shift ^= sizeof(u32) - size;
+ shift *= BITS_PER_BYTE;
+ mask <<= shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+ load32 = *ptr32;
+
+ do {
+ old32 = load32;
+ new32 = (load32 & ~mask) | (val << shift);
+ load32 = cmpxchg(ptr32, old32, new32);
+ } while (load32 != old32);
+
+ return (load32 & mask) >> shift;
+}
+
+unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
+{
+ u32 mask, old32, new32, load32;
+ volatile u32 *ptr32;
+ unsigned int shift;
+ u8 load;
+
+ /* Check that ptr is naturally aligned */
+ WARN_ON((unsigned long)ptr & (size - 1));
+
+ /* Mask inputs to the correct size. */
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+ old &= mask;
+ new &= mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * compare & exchange within the naturally aligned 4 byte integer
+ * that includes it.
+ */
+ shift = (unsigned long)ptr & 0x3;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ shift ^= sizeof(u32) - size;
+ shift *= BITS_PER_BYTE;
+ mask <<= shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+ load32 = *ptr32;
+
+ while (true) {
+ /*
+ * Ensure the byte we want to exchange matches the expected
+ * old value, and if not then bail.
+ */
+ load = (load32 & mask) >> shift;
+ if (load != old)
+ return load;
+
+ /*
+ * Calculate the old & new values of the naturally aligned
+ * 4 byte integer that include the byte we want to exchange.
+ * Attempt to exchange the old value for the new value, and
+ * return if we succeed.
+ */
+ old32 = (load32 & ~mask) | (old << shift);
+ new32 = (load32 & ~mask) | (new << shift);
+ load32 = cmpxchg(ptr32, old32, new32);
+ if (load32 == old32)
+ return old;
+ }
+}
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index a00e87b0256d..b849fe6aad94 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -22,6 +22,7 @@
#define GCR_CL_COHERENCE_OFS 0x2008
#define GCR_CL_ID_OFS 0x2028
+#define CPC_CL_VC_STOP_OFS 0x2020
#define CPC_CL_VC_RUN_OFS 0x2028
.extern mips_cm_base
@@ -376,8 +377,12 @@ LEAF(mips_cps_boot_vpes)
PTR_LI t2, UNCAC_BASE
PTR_ADD t1, t1, t2
- /* Set VC_RUN to the VPE mask */
+ /* Start any other VPs that ought to be running */
PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
+
+ /* Ensure this VP stops running if it shouldn't be */
+ not ta2
+ PTR_S ta2, CPC_CL_VC_STOP_OFS(t1)
ehb
#elif defined(CONFIG_MIPS_MT)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 1aba27786bd5..d08afc7dc507 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -564,6 +564,7 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags)
back_to_back_c0_hazard();
break;
case CPU_I6400:
+ case CPU_I6500:
/* There's no way to disable the FTLB */
if (!(flags & FTLB_EN))
return 1;
@@ -844,6 +845,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_MVH;
if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP))
c->options |= MIPS_CPU_VP;
+ if (config5 & MIPS_CONF5_CA2)
+ c->ases |= MIPS_ASE_MIPS16E2;
return config5 & MIPS_CONF_M;
}
@@ -1635,6 +1638,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_I6400;
__cpu_name[cpu] = "MIPS I6400";
break;
+ case PRID_IMP_I6500:
+ c->cputype = CPU_I6500;
+ __cpu_name[cpu] = "MIPS I6500";
+ break;
case PRID_IMP_M5150:
c->cputype = CPU_M5150;
__cpu_name[cpu] = "MIPS M5150";
@@ -1648,6 +1655,17 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
decode_configs(c);
spram_config();
+
+ switch (__get_cpu_type(c->cputype)) {
+ case CPU_I6500:
+ c->options |= MIPS_CPU_SHARED_FTLB_ENTRIES;
+ /* fall-through */
+ case CPU_I6400:
+ c->options |= MIPS_CPU_SHARED_FTLB_RAM;
+ /* fall-through */
+ default:
+ break;
+ }
}
static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
@@ -1831,6 +1849,12 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
+ case PRID_REV_LOONGSON3A_R3:
+ c->cputype = CPU_LOONGSON3;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
}
decode_configs(c);
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 659e6d3ae335..cb0c57f860d4 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -265,15 +265,34 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp)
u32 val;
preempt_disable();
- curr_core = current_cpu_data.core;
- spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
- per_cpu(cm_core_lock_flags, curr_core));
if (mips_cm_revision() >= CM_REV_CM3) {
val = core << CM3_GCR_Cx_OTHER_CORE_SHF;
val |= vp << CM3_GCR_Cx_OTHER_VP_SHF;
+
+ /*
+ * We need to disable interrupts in SMP systems in order to
+ * ensure that we don't interrupt the caller with code which
+ * may modify the redirect register. We do so here in a
+ * slightly obscure way by using a spin lock, since this has
+ * the neat property of also catching any nested uses of
+ * mips_cm_lock_other() leading to a deadlock or a nice warning
+ * with lockdep enabled.
+ */
+ spin_lock_irqsave(this_cpu_ptr(&cm_core_lock),
+ *this_cpu_ptr(&cm_core_lock_flags));
} else {
- BUG_ON(vp != 0);
+ WARN_ON(vp != 0);
+
+ /*
+ * We only have a GCR_CL_OTHER per core in systems with
+ * CM 2.5 & older, so have to ensure other VP(E)s don't
+ * race with us.
+ */
+ curr_core = current_cpu_data.core;
+ spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+
val = core << CM_GCR_Cx_OTHER_CORENUM_SHF;
}
@@ -288,10 +307,17 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp)
void mips_cm_unlock_other(void)
{
- unsigned curr_core = current_cpu_data.core;
+ unsigned int curr_core;
+
+ if (mips_cm_revision() < CM_REV_CM3) {
+ curr_core = current_cpu_data.core;
+ spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+ } else {
+ spin_unlock_irqrestore(this_cpu_ptr(&cm_core_lock),
+ *this_cpu_ptr(&cm_core_lock_flags));
+ }
- spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
- per_cpu(cm_core_lock_flags, curr_core));
preempt_enable();
}
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c
deleted file mode 100644
index 781168834456..000000000000
--- a/arch/mips/kernel/module-rela.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) 2001 Rusty Russell.
- * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2005 Thiemo Seufer
- * Copyright (C) 2015 Imagination Technologies Ltd.
- */
-
-#include <linux/elf.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/moduleloader.h>
-
-extern int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v);
-
-static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *location = v;
-
- return 0;
-}
-
-static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- if (v % 4) {
- pr_err("module %s: dangerous R_MIPS_26 RELA relocation\n",
- me->name);
- return -ENOEXEC;
- }
-
- if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- pr_err("module %s: relocation overflow\n", me->name);
- return -ENOEXEC;
- }
-
- *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
-
- return 0;
-}
-
-static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x8000LL) >> 16) & 0xffff);
-
- return 0;
-}
-
-static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *location = (*location & 0xffff0000) | (v & 0xffff);
-
- return 0;
-}
-
-static int apply_r_mips_pc_rela(struct module *me, u32 *location, Elf_Addr v,
- unsigned bits)
-{
- unsigned long mask = GENMASK(bits - 1, 0);
- unsigned long se_bits;
- long offset;
-
- if (v % 4) {
- pr_err("module %s: dangerous R_MIPS_PC%u RELA relocation\n",
- me->name, bits);
- return -ENOEXEC;
- }
-
- offset = ((long)v - (long)location) >> 2;
-
- /* check the sign bit onwards are identical - ie. we didn't overflow */
- se_bits = (offset & BIT(bits - 1)) ? ~0ul : 0;
- if ((offset & ~mask) != (se_bits & ~mask)) {
- pr_err("module %s: relocation overflow\n", me->name);
- return -ENOEXEC;
- }
-
- *location = (*location & ~mask) | (offset & mask);
-
- return 0;
-}
-
-static int apply_r_mips_pc16_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- return apply_r_mips_pc_rela(me, location, v, 16);
-}
-
-static int apply_r_mips_pc21_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- return apply_r_mips_pc_rela(me, location, v, 21);
-}
-
-static int apply_r_mips_pc26_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- return apply_r_mips_pc_rela(me, location, v, 26);
-}
-
-static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *(Elf_Addr *)location = v;
-
- return 0;
-}
-
-static int apply_r_mips_higher_rela(struct module *me, u32 *location,
- Elf_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
-
- return 0;
-}
-
-static int apply_r_mips_highest_rela(struct module *me, u32 *location,
- Elf_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
-
- return 0;
-}
-
-static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
- Elf_Addr v) = {
- [R_MIPS_NONE] = apply_r_mips_none,
- [R_MIPS_32] = apply_r_mips_32_rela,
- [R_MIPS_26] = apply_r_mips_26_rela,
- [R_MIPS_HI16] = apply_r_mips_hi16_rela,
- [R_MIPS_LO16] = apply_r_mips_lo16_rela,
- [R_MIPS_PC16] = apply_r_mips_pc16_rela,
- [R_MIPS_64] = apply_r_mips_64_rela,
- [R_MIPS_HIGHER] = apply_r_mips_higher_rela,
- [R_MIPS_HIGHEST] = apply_r_mips_highest_rela,
- [R_MIPS_PC21_S2] = apply_r_mips_pc21_rela,
- [R_MIPS_PC26_S2] = apply_r_mips_pc26_rela,
-};
-
-int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec,
- struct module *me)
-{
- Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
- int (*handler)(struct module *me, u32 *location, Elf_Addr v);
- Elf_Sym *sym;
- u32 *location;
- unsigned int i, type;
- Elf_Addr v;
- int res;
-
- pr_debug("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
-
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- /* This is where to make the change */
- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rel[i].r_offset;
- /* This is the symbol it is referring to */
- sym = (Elf_Sym *)sechdrs[symindex].sh_addr
- + ELF_MIPS_R_SYM(rel[i]);
- if (sym->st_value >= -MAX_ERRNO) {
- /* Ignore unresolved weak symbol */
- if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
- continue;
- pr_warn("%s: Unknown symbol %s\n",
- me->name, strtab + sym->st_name);
- return -ENOENT;
- }
-
- type = ELF_MIPS_R_TYPE(rel[i]);
-
- if (type < ARRAY_SIZE(reloc_handlers_rela))
- handler = reloc_handlers_rela[type];
- else
- handler = NULL;
-
- if (!handler) {
- pr_err("%s: Unknown relocation type %u\n",
- me->name, type);
- return -EINVAL;
- }
-
- v = sym->st_value + rel[i].r_addend;
- res = handler(me, location, v);
- if (res)
- return res;
- }
-
- return 0;
-}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 50c020c47e54..491605137b03 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -53,22 +53,25 @@ void *module_alloc(unsigned long size)
}
#endif
-int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_none(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
return 0;
}
-static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_32(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
- *location += v;
+ *location = base + v;
return 0;
}
-static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_26(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
if (v % 4) {
- pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
+ pr_err("module %s: dangerous R_MIPS_26 relocation\n",
me->name);
return -ENOEXEC;
}
@@ -80,15 +83,22 @@ static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
}
*location = (*location & ~0x03ffffff) |
- ((*location + (v >> 2)) & 0x03ffffff);
+ ((base + (v >> 2)) & 0x03ffffff);
return 0;
}
-static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_hi16(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
struct mips_hi16 *n;
+ if (rela) {
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+ return 0;
+ }
+
/*
* We cannot relocate this one now because we don't know the value of
* the carry we need to add. Save the information, and let LO16 do the
@@ -117,12 +127,18 @@ static void free_relocation_chain(struct mips_hi16 *l)
}
}
-static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_lo16(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
- unsigned long insnlo = *location;
+ unsigned long insnlo = base;
struct mips_hi16 *l;
Elf_Addr val, vallo;
+ if (rela) {
+ *location = (*location & 0xffff0000) | (v & 0xffff);
+ return 0;
+ }
+
/* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
@@ -178,26 +194,26 @@ out_danger:
free_relocation_chain(l);
me->arch.r_mips_hi16_list = NULL;
- pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
+ pr_err("module %s: dangerous R_MIPS_LO16 relocation\n", me->name);
return -ENOEXEC;
}
-static int apply_r_mips_pc_rel(struct module *me, u32 *location, Elf_Addr v,
- unsigned bits)
+static int apply_r_mips_pc(struct module *me, u32 *location, u32 base,
+ Elf_Addr v, unsigned int bits)
{
unsigned long mask = GENMASK(bits - 1, 0);
unsigned long se_bits;
long offset;
if (v % 4) {
- pr_err("module %s: dangerous R_MIPS_PC%u REL relocation\n",
+ pr_err("module %s: dangerous R_MIPS_PC%u relocation\n",
me->name, bits);
return -ENOEXEC;
}
- /* retrieve & sign extend implicit addend */
- offset = *location & mask;
+ /* retrieve & sign extend implicit addend if any */
+ offset = base & mask;
offset |= (offset & BIT(bits - 1)) ? ~mask : 0;
offset += ((long)v - (long)location) >> 2;
@@ -214,99 +230,192 @@ static int apply_r_mips_pc_rel(struct module *me, u32 *location, Elf_Addr v,
return 0;
}
-static int apply_r_mips_pc16_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_pc16(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ return apply_r_mips_pc(me, location, base, v, 16);
+}
+
+static int apply_r_mips_pc21(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ return apply_r_mips_pc(me, location, base, v, 21);
+}
+
+static int apply_r_mips_pc26(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ return apply_r_mips_pc(me, location, base, v, 26);
+}
+
+static int apply_r_mips_64(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
- return apply_r_mips_pc_rel(me, location, v, 16);
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *(Elf_Addr *)location = v;
+
+ return 0;
}
-static int apply_r_mips_pc21_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_higher(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
- return apply_r_mips_pc_rel(me, location, v, 21);
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *location = (*location & 0xffff0000) |
+ ((((long long)v + 0x80008000LL) >> 32) & 0xffff);
+
+ return 0;
}
-static int apply_r_mips_pc26_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_highest(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
- return apply_r_mips_pc_rel(me, location, v, 26);
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *location = (*location & 0xffff0000) |
+ ((((long long)v + 0x800080008000LL) >> 48) & 0xffff);
+
+ return 0;
}
-static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
- Elf_Addr v) = {
+/**
+ * reloc_handler() - Apply a particular relocation to a module
+ * @me: the module to apply the reloc to
+ * @location: the address at which the reloc is to be applied
+ * @base: the existing value at location for REL-style; 0 for RELA-style
+ * @v: the value of the reloc, with addend for RELA-style
+ *
+ * Each implemented reloc_handler function applies a particular type of
+ * relocation to the module @me. Relocs that may be found in either REL or RELA
+ * variants can be handled by making use of the @base & @v parameters which are
+ * set to values which abstract the difference away from the particular reloc
+ * implementations.
+ *
+ * Return: 0 upon success, else -ERRNO
+ */
+typedef int (*reloc_handler)(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela);
+
+/* The handlers for known reloc types */
+static reloc_handler reloc_handlers[] = {
[R_MIPS_NONE] = apply_r_mips_none,
- [R_MIPS_32] = apply_r_mips_32_rel,
- [R_MIPS_26] = apply_r_mips_26_rel,
- [R_MIPS_HI16] = apply_r_mips_hi16_rel,
- [R_MIPS_LO16] = apply_r_mips_lo16_rel,
- [R_MIPS_PC16] = apply_r_mips_pc16_rel,
- [R_MIPS_PC21_S2] = apply_r_mips_pc21_rel,
- [R_MIPS_PC26_S2] = apply_r_mips_pc26_rel,
+ [R_MIPS_32] = apply_r_mips_32,
+ [R_MIPS_26] = apply_r_mips_26,
+ [R_MIPS_HI16] = apply_r_mips_hi16,
+ [R_MIPS_LO16] = apply_r_mips_lo16,
+ [R_MIPS_PC16] = apply_r_mips_pc16,
+ [R_MIPS_64] = apply_r_mips_64,
+ [R_MIPS_HIGHER] = apply_r_mips_higher,
+ [R_MIPS_HIGHEST] = apply_r_mips_highest,
+ [R_MIPS_PC21_S2] = apply_r_mips_pc21,
+ [R_MIPS_PC26_S2] = apply_r_mips_pc26,
};
-int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec,
- struct module *me)
+static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me, bool rela)
{
- Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
- int (*handler)(struct module *me, u32 *location, Elf_Addr v);
+ union {
+ Elf_Mips_Rel *rel;
+ Elf_Mips_Rela *rela;
+ } r;
+ reloc_handler handler;
Elf_Sym *sym;
- u32 *location;
+ u32 *location, base;
unsigned int i, type;
Elf_Addr v;
- int res;
+ int err = 0;
+ size_t reloc_sz;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
+ r.rel = (void *)sechdrs[relsec].sh_addr;
+ reloc_sz = rela ? sizeof(*r.rela) : sizeof(*r.rel);
me->arch.r_mips_hi16_list = NULL;
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ for (i = 0; i < sechdrs[relsec].sh_size / reloc_sz; i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rel[i].r_offset;
+ + r.rel->r_offset;
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
- + ELF_MIPS_R_SYM(rel[i]);
+ + ELF_MIPS_R_SYM(*r.rel);
if (sym->st_value >= -MAX_ERRNO) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
pr_warn("%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
- return -ENOENT;
+ err = -ENOENT;
+ goto out;
}
- type = ELF_MIPS_R_TYPE(rel[i]);
-
- if (type < ARRAY_SIZE(reloc_handlers_rel))
- handler = reloc_handlers_rel[type];
+ type = ELF_MIPS_R_TYPE(*r.rel);
+ if (type < ARRAY_SIZE(reloc_handlers))
+ handler = reloc_handlers[type];
else
handler = NULL;
if (!handler) {
pr_err("%s: Unknown relocation type %u\n",
me->name, type);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
- v = sym->st_value;
- res = handler(me, location, v);
- if (res)
- return res;
+ if (rela) {
+ v = sym->st_value + r.rela->r_addend;
+ base = 0;
+ r.rela = &r.rela[1];
+ } else {
+ v = sym->st_value;
+ base = *location;
+ r.rel = &r.rel[1];
+ }
+
+ err = handler(me, location, base, v, rela);
+ if (err)
+ goto out;
}
+out:
/*
- * Normally the hi16 list should be deallocated at this point. A
+ * Normally the hi16 list should be deallocated at this point. A
* malformed binary however could contain a series of R_MIPS_HI16
- * relocations not followed by a R_MIPS_LO16 relocation. In that
- * case, free up the list and return an error.
+ * relocations not followed by a R_MIPS_LO16 relocation, or if we hit
+ * an error processing a reloc we might have gotten here before
+ * reaching the R_MIPS_LO16. In either case, free up the list and
+ * return an error.
*/
if (me->arch.r_mips_hi16_list) {
free_relocation_chain(me->arch.r_mips_hi16_list);
me->arch.r_mips_hi16_list = NULL;
-
- return -ENOEXEC;
+ err = err ?: -ENOEXEC;
}
- return 0;
+ return err;
+}
+
+int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ return __apply_relocate(sechdrs, strtab, symindex, relsec, me, false);
+}
+
+#ifdef CONFIG_MODULES_USE_ELF_RELA
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ return __apply_relocate(sechdrs, strtab, symindex, relsec, me, true);
}
+#endif /* CONFIG_MODULES_USE_ELF_RELA */
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_dbetables(unsigned long addr)
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index f3e301f95aef..9e6c74bf66c4 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -814,7 +814,7 @@ static const struct mips_perf_event mipsxxcore_event_map2
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
};
-static const struct mips_perf_event i6400_event_map[PERF_COUNT_HW_MAX] = {
+static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD },
/* These only count dcache, not icache */
@@ -1014,7 +1014,7 @@ static const struct mips_perf_event mipsxxcore_cache_map2
},
};
-static const struct mips_perf_event i6400_cache_map
+static const struct mips_perf_event i6x00_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
@@ -1610,6 +1610,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
#endif
break;
case CPU_I6400:
+ case CPU_I6500:
/* 8-bit event numbers */
base_id = config & 0xff;
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
@@ -1770,8 +1771,13 @@ init_hw_perf_events(void)
break;
case CPU_I6400:
mipspmu.name = "mips/I6400";
- mipspmu.general_event_map = &i6400_event_map;
- mipspmu.cache_event_map = &i6400_cache_map;
+ mipspmu.general_event_map = &i6x00_event_map;
+ mipspmu.cache_event_map = &i6x00_cache_map;
+ break;
+ case CPU_I6500:
+ mipspmu.name = "mips/I6500";
+ mipspmu.general_event_map = &i6x00_event_map;
+ mipspmu.cache_event_map = &i6x00_cache_map;
break;
case CPU_1004K:
mipspmu.name = "mips/1004K";
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 4eff2aed7360..70604c753aa4 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -83,7 +83,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
seq_printf(m, "isa\t\t\t:");
- if (cpu_has_mips_r1)
+ if (cpu_has_mips_1)
seq_printf(m, " mips1");
if (cpu_has_mips_2)
seq_printf(m, "%s", " mips2");
@@ -109,6 +109,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "ASEs implemented\t:");
if (cpu_has_mips16) seq_printf(m, "%s", " mips16");
+ if (cpu_has_mips16e2) seq_printf(m, "%s", " mips16e2");
if (cpu_has_mdmx) seq_printf(m, "%s", " mdmx");
if (cpu_has_mips3d) seq_printf(m, "%s", " mips3d");
if (cpu_has_smartmips) seq_printf(m, "%s", " smartmips");
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 6931fe722a0b..6dd13641a418 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -868,14 +868,39 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
tracehook_report_syscall_entry(regs))
return -1;
- if (secure_computing(NULL) == -1)
- return -1;
+#ifdef CONFIG_SECCOMP
+ if (unlikely(test_thread_flag(TIF_SECCOMP))) {
+ int ret, i;
+ struct seccomp_data sd;
+
+ sd.nr = syscall;
+ sd.arch = syscall_get_arch();
+ for (i = 0; i < 6; i++) {
+ unsigned long v, r;
+
+ r = mips_get_syscall_arg(&v, current, regs, i);
+ sd.args[i] = r ? 0 : v;
+ }
+ sd.instruction_pointer = KSTK_EIP(current);
+
+ ret = __secure_computing(&sd);
+ if (ret == -1)
+ return ret;
+ }
+#endif
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
+
+ /*
+ * Negative syscall numbers are mistaken for rejected syscalls, but
+ * won't have had the return value set appropriately, so we do so now.
+ */
+ if (syscall < 0)
+ syscall_set_return_value(current, regs, -ENOSYS, 0);
return syscall;
}
@@ -895,7 +920,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->regs[2]);
+ trace_sys_exit(regs, regs_return_value(regs));
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 80ed68b2c95e..27c2f90eeb21 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -371,7 +371,7 @@ EXPORT(sys_call_table)
PTR sys_writev
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 49765b44aa9b..65d5aeeb9bdb 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -311,7 +311,7 @@ EXPORT(sys_call_table)
PTR sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_io_setup /* 5200 */
PTR sys_io_destroy
PTR sys_io_getevents
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 90bad2d1b2d3..cbf190ef9e8a 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -302,7 +302,7 @@ EXPORT(sysn32_call_table)
PTR compat_sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR compat_sys_io_setup /* 6200 */
PTR sys_io_destroy
PTR compat_sys_io_getevents
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 2dd70bd104e1..c30bc520885f 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -371,7 +371,7 @@ EXPORT(sys32_call_table)
PTR compat_sys_writev
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 01d1dbde5fbf..fe3939726765 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -670,6 +670,46 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);
+static int __init early_parse_memmap(char *p)
+{
+ char *oldp;
+ u64 start_at, mem_size;
+
+ if (!p)
+ return -EINVAL;
+
+ if (!strncmp(p, "exactmap", 8)) {
+ pr_err("\"memmap=exactmap\" invalid on MIPS\n");
+ return 0;
+ }
+
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ return -EINVAL;
+
+ if (*p == '@') {
+ start_at = memparse(p+1, &p);
+ add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
+ } else if (*p == '#') {
+ pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
+ return -EINVAL;
+ } else if (*p == '$') {
+ start_at = memparse(p+1, &p);
+ add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
+ } else {
+ pr_err("\"memmap\" invalid format!\n");
+ return -EINVAL;
+ }
+
+ if (*p == '\0') {
+ usermem = 1;
+ return 0;
+ } else
+ return -EINVAL;
+}
+early_param("memmap", early_parse_memmap);
+
#ifdef CONFIG_PROC_VMCORE
unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
static int __init early_parse_elfcorehdr(char *p)
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 36954ddd0b9f..f832e99ad4c3 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -142,9 +142,11 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
/* Warn the user if the CCA prevents multi-core */
ncores = mips_cm_numcores();
- if (cca_unsuitable && ncores > 1) {
- pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
- cca);
+ if ((cca_unsuitable || cpu_has_dc_aliases) && ncores > 1) {
+ pr_warn("Using only one core due to %s%s%s\n",
+ cca_unsuitable ? "unsuitable CCA" : "",
+ (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
+ cpu_has_dc_aliases ? "dcache aliasing" : "");
for_each_present_cpu(c) {
if (cpu_data[c].core)
@@ -488,6 +490,7 @@ static void cps_cpu_die(unsigned int cpu)
{
unsigned core = cpu_data[cpu].core;
unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ ktime_t fail_time;
unsigned stat;
int err;
@@ -514,6 +517,7 @@ static void cps_cpu_die(unsigned int cpu)
* state, the latter happening when a JTAG probe is connected
* in which case the CPC will refuse to power down the core.
*/
+ fail_time = ktime_add_ms(ktime_get(), 2000);
do {
mips_cm_lock_other(core, 0);
mips_cpc_lock_other(core);
@@ -521,9 +525,28 @@ static void cps_cpu_die(unsigned int cpu)
stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
mips_cpc_unlock_other();
mips_cm_unlock_other();
- } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
- stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
- stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
+
+ if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
+ stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
+ stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
+ break;
+
+ /*
+ * The core ought to have powered down, but didn't &
+ * now we don't really know what state it's in. It's
+ * likely that its _pwr_up pin has been wired to logic
+ * 1 & it powered back up as soon as we powered it
+ * down...
+ *
+ * The best we can do is warn the user & continue in
+ * the hope that the core is doing nothing harmful &
+ * might behave properly if we online it later.
+ */
+ if (WARN(ktime_after(ktime_get(), fail_time),
+ "CPU%u hasn't powered down, seq. state %u\n",
+ cpu, stat >> CPC_Cx_STAT_CONF_SEQSTATE_SHF))
+ break;
+ } while (1);
/* Indicate the core is powered off */
bitmap_clear(core_power, core, 1);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index aba1afb64b62..770d4d1516cb 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -335,6 +335,9 @@ int mips_smp_ipi_free(const struct cpumask *mask)
static int __init mips_smp_ipi_init(void)
{
+ if (num_possible_cpus() == 1)
+ return 0;
+
mips_smp_ipi_allocate(cpu_possible_mask);
call_desc = irq_to_desc(call_virq);
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 1dfa7f5796c7..58c6f634b550 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -29,6 +29,7 @@
#include <linux/sched/task_stack.h>
#include <asm/asm.h>
+#include <asm/asm-eva.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
@@ -131,16 +132,14 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
__asm__ __volatile__ (
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" li %[err], 0 \n"
- "1: ll %[old], (%[addr]) \n"
+ "1: \n"
+ user_ll("%[old]", "(%[addr])")
" move %[tmp], %[new] \n"
- "2: sc %[tmp], (%[addr]) \n"
- " bnez %[tmp], 4f \n"
+ "2: \n"
+ user_sc("%[tmp]", "(%[addr])")
+ " beqz %[tmp], 1b \n"
"3: \n"
" .insn \n"
- " .subsection 2 \n"
- "4: b 1b \n"
- " .previous \n"
- " \n"
" .section .fixup,\"ax\" \n"
"5: li %[err], %[efault] \n"
" j 3b \n"
@@ -192,6 +191,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
unreachable();
}
+/*
+ * mips_atomic_set() normally returns directly via syscall_exit potentially
+ * clobbering static registers, so be sure to preserve them.
+ */
+save_static_function(sys_sysmips);
+
SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
{
switch (cmd) {
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index f806ee56e639..5eaf2578ac04 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -939,88 +939,114 @@ static void emulate_load_store_insn(struct pt_regs *regs,
* The remaining opcodes are the ones that are really of
* interest.
*/
-#ifdef CONFIG_EVA
case spec3_op:
- /*
- * we can land here only from kernel accessing user memory,
- * so we need to "switch" the address limit to user space, so
- * address check can work properly.
- */
- seg = get_fs();
- set_fs(USER_DS);
- switch (insn.spec3_format.func) {
- case lhe_op:
- if (!access_ok(VERIFY_READ, addr, 2)) {
- set_fs(seg);
- goto sigbus;
- }
- LoadHWE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
- }
- compute_return_epc(regs);
- regs->regs[insn.spec3_format.rt] = value;
- break;
- case lwe_op:
- if (!access_ok(VERIFY_READ, addr, 4)) {
- set_fs(seg);
- goto sigbus;
+ if (insn.dsp_format.func == lx_op) {
+ switch (insn.dsp_format.op) {
+ case lwx_op:
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.dsp_format.rd] = value;
+ break;
+ case lhx_op:
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.dsp_format.rd] = value;
+ break;
+ default:
+ goto sigill;
}
+ }
+#ifdef CONFIG_EVA
+ else {
+ /*
+ * we can land here only from kernel accessing user
+ * memory, so we need to "switch" the address limit to
+ * user space, so that address check can work properly.
+ */
+ seg = get_fs();
+ set_fs(USER_DS);
+ switch (insn.spec3_format.func) {
+ case lhe_op:
+ if (!access_ok(VERIFY_READ, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ LoadHWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lwe_op:
+ if (!access_ok(VERIFY_READ, addr, 4)) {
+ set_fs(seg);
+ goto sigbus;
+ }
LoadWE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
- }
- compute_return_epc(regs);
- regs->regs[insn.spec3_format.rt] = value;
- break;
- case lhue_op:
- if (!access_ok(VERIFY_READ, addr, 2)) {
- set_fs(seg);
- goto sigbus;
- }
- LoadHWUE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
- }
- compute_return_epc(regs);
- regs->regs[insn.spec3_format.rt] = value;
- break;
- case she_op:
- if (!access_ok(VERIFY_WRITE, addr, 2)) {
- set_fs(seg);
- goto sigbus;
- }
- compute_return_epc(regs);
- value = regs->regs[insn.spec3_format.rt];
- StoreHWE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
- }
- break;
- case swe_op:
- if (!access_ok(VERIFY_WRITE, addr, 4)) {
- set_fs(seg);
- goto sigbus;
- }
- compute_return_epc(regs);
- value = regs->regs[insn.spec3_format.rt];
- StoreWE(addr, value, res);
- if (res) {
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lhue_op:
+ if (!access_ok(VERIFY_READ, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ LoadHWUE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case she_op:
+ if (!access_ok(VERIFY_WRITE, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreHWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ break;
+ case swe_op:
+ if (!access_ok(VERIFY_WRITE, addr, 4)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ break;
+ default:
set_fs(seg);
- goto fault;
+ goto sigill;
}
- break;
- default:
set_fs(seg);
- goto sigill;
}
- set_fs(seg);
- break;
#endif
+ break;
case lh_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
@@ -1984,6 +2010,8 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
u16 __user *pc16;
unsigned long origpc;
union mips16e_instruction mips16inst, oldinst;
+ unsigned int opcode;
+ int extended = 0;
origpc = regs->cp0_epc;
orig31 = regs->regs[31];
@@ -1996,6 +2024,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
/* skip EXTEND instruction */
if (mips16inst.ri.opcode == MIPS16e_extend_op) {
+ extended = 1;
pc16++;
__get_user(mips16inst.full, pc16);
} else if (delay_slot(regs)) {
@@ -2008,7 +2037,8 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
goto sigbus;
}
- switch (mips16inst.ri.opcode) {
+ opcode = mips16inst.ri.opcode;
+ switch (opcode) {
case MIPS16e_i64_op: /* I64 or RI64 instruction */
switch (mips16inst.i64.func) { /* I64/RI64 func field check */
case MIPS16e_ldpc_func:
@@ -2028,9 +2058,40 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
goto sigbus;
case MIPS16e_swsp_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ if (extended && cpu_has_mips16e2)
+ switch (mips16inst.ri.imm >> 5) {
+ case 0: /* SWSP */
+ case 1: /* SWGP */
+ break;
+ case 2: /* SHGP */
+ opcode = MIPS16e_sh_op;
+ break;
+ default:
+ goto sigbus;
+ }
+ break;
+
case MIPS16e_lwpc_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ break;
+
case MIPS16e_lwsp_op:
reg = reg16to32[mips16inst.ri.rx];
+ if (extended && cpu_has_mips16e2)
+ switch (mips16inst.ri.imm >> 5) {
+ case 0: /* LWSP */
+ case 1: /* LWGP */
+ break;
+ case 2: /* LHGP */
+ opcode = MIPS16e_lh_op;
+ break;
+ case 4: /* LHUGP */
+ opcode = MIPS16e_lhu_op;
+ break;
+ default:
+ goto sigbus;
+ }
break;
case MIPS16e_i8_op:
@@ -2044,7 +2105,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
break;
}
- switch (mips16inst.ri.opcode) {
+ switch (opcode) {
case MIPS16e_lb_op:
case MIPS16e_lbu_op:
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 3114a2ed1f4e..03e3304d6ae5 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -28,6 +28,9 @@
#ifdef CONFIG_MIPS_MALTA
#undef CONFIG_CPU_HAS_PREFETCH
#endif
+#ifdef CONFIG_CPU_MIPSR6
+#undef CONFIG_CPU_HAS_PREFETCH
+#endif
#include <asm/asm.h>
#include <asm/asm-offsets.h>
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index 6afa21850267..1e8a955ae5a8 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -90,7 +90,9 @@ void __init prom_init_env(void)
cpu_clock_freq = ecpu->cpu_clock_freq;
loongson_sysconf.cputype = ecpu->cputype;
- if (ecpu->cputype == Loongson_3A) {
+ switch (ecpu->cputype) {
+ case Legacy_3A:
+ case Loongson_3A:
loongson_sysconf.cores_per_node = 4;
loongson_sysconf.cores_per_package = 4;
smp_group[0] = 0x900000003ff01000;
@@ -111,7 +113,9 @@ void __init prom_init_env(void)
loongson_freqctrl[3] = 0x900030001fe001d0;
loongson_sysconf.ht_control_base = 0x90000EFDFB000000;
loongson_sysconf.workarounds = WORKAROUND_CPUFREQ;
- } else if (ecpu->cputype == Loongson_3B) {
+ break;
+ case Legacy_3B:
+ case Loongson_3B:
loongson_sysconf.cores_per_node = 4; /* One chip has 2 nodes */
loongson_sysconf.cores_per_package = 8;
smp_group[0] = 0x900000003ff01000;
@@ -132,7 +136,8 @@ void __init prom_init_env(void)
loongson_freqctrl[3] = 0x900060001fe001d0;
loongson_sysconf.ht_control_base = 0x90001EFDFB000000;
loongson_sysconf.workarounds = WORKAROUND_CPUHOTPLUG;
- } else {
+ break;
+ default:
loongson_sysconf.cores_per_node = 1;
loongson_sysconf.cores_per_package = 1;
loongson_chipcfg[0] = 0x900000001fe00180;
@@ -193,6 +198,7 @@ void __init prom_init_env(void)
break;
case PRID_REV_LOONGSON3A_R1:
case PRID_REV_LOONGSON3A_R2:
+ case PRID_REV_LOONGSON3A_R3:
cpu_clock_freq = 900000000;
break;
case PRID_REV_LOONGSON3B_R1:
diff --git a/arch/mips/loongson64/common/init.c b/arch/mips/loongson64/common/init.c
index 9b987fe98b5b..6ef17120722f 100644
--- a/arch/mips/loongson64/common/init.c
+++ b/arch/mips/loongson64/common/init.c
@@ -10,13 +10,25 @@
#include <linux/bootmem.h>
#include <asm/bootinfo.h>
+#include <asm/traps.h>
#include <asm/smp-ops.h>
+#include <asm/cacheflush.h>
#include <loongson.h>
/* Loongson CPU address windows config space base address */
unsigned long __maybe_unused _loongson_addrwincfg_base;
+static void __init mips_nmi_setup(void)
+{
+ void *base;
+ extern char except_vec_nmi;
+
+ base = (void *)(CAC_BASE + 0x380);
+ memcpy(base, &except_vec_nmi, 0x80);
+ flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
+}
+
void __init prom_init(void)
{
#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
@@ -40,6 +52,7 @@ void __init prom_init(void)
/*init the uart base address */
prom_init_uart_base();
register_smp_ops(&loongson3_smp_ops);
+ board_nmi_handler_setup = mips_nmi_setup;
}
void __init prom_free_prom_memory(void)
diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c
index 548f759454dc..7202e52cd046 100644
--- a/arch/mips/loongson64/loongson-3/irq.c
+++ b/arch/mips/loongson64/loongson-3/irq.c
@@ -9,18 +9,69 @@
#include "smp.h"
+extern void loongson3_send_irq_by_ipi(int cpu, int irqs);
+
+unsigned int irq_cpu[16] = {[0 ... 15] = -1};
unsigned int ht_irq[] = {0, 1, 3, 4, 5, 6, 7, 8, 12, 14, 15};
+unsigned int local_irq = 1<<0 | 1<<1 | 1<<2 | 1<<7 | 1<<8 | 1<<12;
+
+int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
+ bool force)
+{
+ unsigned int cpu;
+ struct cpumask new_affinity;
+
+ /* I/O devices are connected on package-0 */
+ cpumask_copy(&new_affinity, affinity);
+ for_each_cpu(cpu, affinity)
+ if (cpu_data[cpu].package > 0)
+ cpumask_clear_cpu(cpu, &new_affinity);
+
+ if (cpumask_empty(&new_affinity))
+ return -EINVAL;
+
+ cpumask_copy(d->common->affinity, &new_affinity);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
static void ht_irqdispatch(void)
{
unsigned int i, irq;
+ struct irq_data *irqd;
+ struct cpumask affinity;
irq = LOONGSON_HT1_INT_VECTOR(0);
LOONGSON_HT1_INT_VECTOR(0) = irq; /* Acknowledge the IRQs */
for (i = 0; i < ARRAY_SIZE(ht_irq); i++) {
- if (irq & (0x1 << ht_irq[i]))
+ if (!(irq & (0x1 << ht_irq[i])))
+ continue;
+
+ /* handled by local core */
+ if (local_irq & (0x1 << ht_irq[i])) {
do_IRQ(ht_irq[i]);
+ continue;
+ }
+
+ irqd = irq_get_irq_data(ht_irq[i]);
+ cpumask_and(&affinity, irqd->common->affinity, cpu_active_mask);
+ if (cpumask_empty(&affinity)) {
+ do_IRQ(ht_irq[i]);
+ continue;
+ }
+
+ irq_cpu[ht_irq[i]] = cpumask_next(irq_cpu[ht_irq[i]], &affinity);
+ if (irq_cpu[ht_irq[i]] >= nr_cpu_ids)
+ irq_cpu[ht_irq[i]] = cpumask_first(&affinity);
+
+ if (irq_cpu[ht_irq[i]] == 0) {
+ do_IRQ(ht_irq[i]);
+ continue;
+ }
+
+ /* balanced by other cores */
+ loongson3_send_irq_by_ipi(irq_cpu[ht_irq[i]], (0x1 << ht_irq[i]));
}
}
@@ -120,11 +171,16 @@ void irq_router_init(void)
void __init mach_init_irq(void)
{
+ struct irq_chip *chip;
+
clear_c0_status(ST0_IM | ST0_BEV);
irq_router_init();
mips_cpu_irq_init();
init_i8259_irqs();
+ chip = irq_get_chip(I8259A_IRQ_BASE);
+ chip->irq_set_affinity = plat_set_irq_affinity;
+
irq_set_chip_and_handler(LOONGSON_UART_IRQ,
&loongson_irq_chip, handle_level_irq);
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 64659fc73940..b7a355c3c408 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -254,13 +254,21 @@ loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(i)]);
}
+#define IPI_IRQ_OFFSET 6
+
+void loongson3_send_irq_by_ipi(int cpu, int irqs)
+{
+ loongson3_ipi_write32(irqs << IPI_IRQ_OFFSET, ipi_set0_regs[cpu_logical_map(cpu)]);
+}
+
void loongson3_ipi_interrupt(struct pt_regs *regs)
{
int i, cpu = smp_processor_id();
- unsigned int action, c0count;
+ unsigned int action, c0count, irqs;
/* Load the ipi register to figure out what we're supposed to do */
action = loongson3_ipi_read32(ipi_status0_regs[cpu_logical_map(cpu)]);
+ irqs = action >> IPI_IRQ_OFFSET;
/* Clear the ipi register to clear the interrupt */
loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu_logical_map(cpu)]);
@@ -282,6 +290,14 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
core0_c0count[i] = c0count;
__wbflush(); /* Let others see the result ASAP */
}
+
+ if (irqs) {
+ int irq;
+ while ((irq = ffs(irqs))) {
+ do_IRQ(irq-1);
+ irqs &= ~(1<<(irq-1));
+ }
+ }
}
#define MAX_LOOPS 800
@@ -503,7 +519,7 @@ static void loongson3a_r1_play_dead(int *state_addr)
: "a1");
}
-static void loongson3a_r2_play_dead(int *state_addr)
+static void loongson3a_r2r3_play_dead(int *state_addr)
{
register int val;
register long cpuid, core, node, count;
@@ -664,8 +680,9 @@ void play_dead(void)
(void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead);
break;
case PRID_REV_LOONGSON3A_R2:
+ case PRID_REV_LOONGSON3A_R3:
play_dead_at_ckseg1 =
- (void *)CKSEG1ADDR((unsigned long)loongson3a_r2_play_dead);
+ (void *)CKSEG1ADDR((unsigned long)loongson3a_r2r3_play_dead);
break;
case PRID_REV_LOONGSON3B_R1:
case PRID_REV_LOONGSON3B_R2:
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index f12fde10c8ad..f08a7b4facb9 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1142,7 +1142,7 @@ emul:
case mfhc_op:
if (!cpu_has_mips_r2_r6)
- goto sigill;
+ return SIGILL;
/* copregister rd -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
@@ -1153,7 +1153,7 @@ emul:
case mthc_op:
if (!cpu_has_mips_r2_r6)
- goto sigill;
+ return SIGILL;
/* copregister rd <- gpr[rt] */
SITOHREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
@@ -1376,7 +1376,6 @@ branch_common:
xcp->regs[MIPSInst_RS(ir)];
break;
default:
-sigill:
return SIGILL;
}
@@ -2524,6 +2523,35 @@ dcopuop:
return 0;
}
+/*
+ * Emulate FPU instructions.
+ *
+ * If we use FPU hardware, then we have been typically called to handle
+ * an unimplemented operation, such as where an operand is a NaN or
+ * denormalized. In that case exit the emulation loop after a single
+ * iteration so as to let hardware execute any subsequent instructions.
+ *
+ * If we have no FPU hardware or it has been disabled, then continue
+ * emulating floating-point instructions until one of these conditions
+ * has occurred:
+ *
+ * - a non-FPU instruction has been encountered,
+ *
+ * - an attempt to emulate has ended with a signal,
+ *
+ * - the ISA mode has been switched.
+ *
+ * We need to terminate the emulation loop if we got switched to the
+ * MIPS16 mode, whether supported or not, so that we do not attempt
+ * to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
+ * Similarly if we got switched to the microMIPS mode and only the
+ * regular MIPS mode is supported, so that we do not attempt to emulate
+ * a microMIPS instruction as a regular MIPS FPU instruction. Or if
+ * we got switched to the regular MIPS mode and only the microMIPS mode
+ * is supported, so that we do not attempt to emulate a regular MIPS
+ * instruction that should cause an Address Error exception instead.
+ * For simplicity we always terminate upon an ISA mode switch.
+ */
int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
int has_fpu, void *__user *fault_addr)
{
@@ -2609,6 +2637,15 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
break;
if (sig)
break;
+ /*
+ * We have to check for the ISA bit explicitly here,
+ * because `get_isa16_mode' may return 0 if support
+ * for code compression has been globally disabled,
+ * or otherwise we may produce the wrong signal or
+ * even proceed successfully where we must not.
+ */
+ if ((xcp->cp0_epc ^ prevepc) & 0x1)
+ break;
cond_resched();
} while (xcp->cp0_epc > prevepc);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 3fe99cb271a9..81d6a15c93d0 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1453,6 +1453,7 @@ static void probe_pcache(void)
case CPU_20KC:
case CPU_25KF:
case CPU_I6400:
+ case CPU_I6500:
case CPU_SB1:
case CPU_SB1A:
case CPU_XLR:
@@ -1512,6 +1513,7 @@ static void probe_pcache(void)
case CPU_ALCHEMY:
case CPU_I6400:
+ case CPU_I6500:
c->icache.flags |= MIPS_CACHE_IC_F_DC;
break;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index ed1c5297547a..5aadc69c8ce3 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -153,8 +153,7 @@ static int scratchpad_offset(int i)
*/
static int m4kc_tlbp_war(void)
{
- return (current_cpu_data.processor_id & 0xffff00) ==
- (PRID_COMP_MIPS | PRID_IMP_4KC);
+ return current_cpu_type() == CPU_4KC;
}
/* Handle labels (which must be positive integers). */
@@ -2015,6 +2014,26 @@ static void build_r3000_tlb_modify_handler(void)
}
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
+static bool cpu_has_tlbex_tlbp_race(void)
+{
+ /*
+ * When a Hardware Table Walker is running it can replace TLB entries
+ * at any time, leading to a race between it & the CPU.
+ */
+ if (cpu_has_htw)
+ return true;
+
+ /*
+ * If the CPU shares FTLB RAM with its siblings then our entry may be
+ * replaced at any time by a sibling performing a write to the FTLB.
+ */
+ if (cpu_has_shared_ftlb_ram)
+ return true;
+
+ /* In all other cases there ought to be no race condition to handle */
+ return false;
+}
+
/*
* R4000 style TLB load/store/modify handlers.
*/
@@ -2051,7 +2070,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
if (!m4kc_tlbp_war()) {
build_tlb_probe_entry(p);
- if (cpu_has_htw) {
+ if (cpu_has_tlbex_tlbp_race()) {
/* race condition happens, leaving */
uasm_i_ehb(p);
uasm_i_mfc0(p, wr.r3, C0_INDEX);
@@ -2125,6 +2144,14 @@ static void build_r4000_tlb_load_handler(void)
}
uasm_i_nop(&p);
+ /*
+ * Warn if something may race with us & replace the TLB entry
+ * before we read it here. Everything with such races should
+ * also have dedicated RiXi exception handlers, so this
+ * shouldn't be hit.
+ */
+ WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
+
uasm_i_tlbr(&p);
switch (current_cpu_type()) {
@@ -2192,6 +2219,14 @@ static void build_r4000_tlb_load_handler(void)
}
uasm_i_nop(&p);
+ /*
+ * Warn if something may race with us & replace the TLB entry
+ * before we read it here. Everything with such races should
+ * also have dedicated RiXi exception handlers, so this
+ * shouldn't be hit.
+ */
+ WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
+
uasm_i_tlbr(&p);
switch (current_cpu_type()) {
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 277cf52d80e1..c28ff53c8da0 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -40,93 +40,92 @@
#include "uasm.c"
-static struct insn insn_table_MM[] = {
- { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
- { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
- { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
- { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
- { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_beql, 0, 0 },
- { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
- { insn_bgezl, 0, 0 },
- { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
- { insn_bltzl, 0, 0 },
- { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
- { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
- { insn_cfc1, M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS },
- { insn_cfcmsa, M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE },
- { insn_ctc1, M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS },
- { insn_ctcmsa, M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE },
- { insn_daddu, 0, 0 },
- { insn_daddiu, 0, 0 },
- { insn_di, M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS },
- { insn_divu, M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS },
- { insn_dmfc0, 0, 0 },
- { insn_dmtc0, 0, 0 },
- { insn_dsll, 0, 0 },
- { insn_dsll32, 0, 0 },
- { insn_dsra, 0, 0 },
- { insn_dsrl, 0, 0 },
- { insn_dsrl32, 0, 0 },
- { insn_drotr, 0, 0 },
- { insn_drotr32, 0, 0 },
- { insn_dsubu, 0, 0 },
- { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
- { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
- { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
- { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jalr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS },
- { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
- { insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
- { insn_ld, 0, 0 },
- { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM },
- { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
- { insn_lld, 0, 0 },
- { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
- { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
- { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
- { insn_mfhi, M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS },
- { insn_mflo, M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS },
- { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
- { insn_mthi, M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS },
- { insn_mtlo, M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS },
- { insn_mul, M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD },
- { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
- { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
- { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
- { insn_rfe, 0, 0 },
- { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
- { insn_scd, 0, 0 },
- { insn_sd, 0, 0 },
- { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
- { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD },
- { insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD },
- { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
- { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD },
- { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
- { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
- { insn_srlv, M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD },
- { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
- { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
- { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
- { insn_sync, M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS },
- { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
- { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
- { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
- { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
- { insn_wait, M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM },
- { insn_wsbh, M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS },
- { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
- { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
- { insn_dins, 0, 0 },
- { insn_dinsm, 0, 0 },
- { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
- { insn_bbit0, 0, 0 },
- { insn_bbit1, 0, 0 },
- { insn_lwx, 0, 0 },
- { insn_ldx, 0, 0 },
- { insn_invalid, 0, 0 }
+static const struct insn const insn_table_MM[insn_invalid] = {
+ [insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD},
+ [insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD},
+ [insn_andi] = {M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM},
+ [insn_beq] = {M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_beql] = {0, 0},
+ [insn_bgez] = {M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM},
+ [insn_bgezl] = {0, 0},
+ [insn_bltz] = {M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM},
+ [insn_bltzl] = {0, 0},
+ [insn_bne] = {M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM},
+ [insn_cache] = {M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM},
+ [insn_cfc1] = {M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS},
+ [insn_cfcmsa] = {M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE},
+ [insn_ctc1] = {M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS},
+ [insn_ctcmsa] = {M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE},
+ [insn_daddu] = {0, 0},
+ [insn_daddiu] = {0, 0},
+ [insn_di] = {M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS},
+ [insn_divu] = {M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS},
+ [insn_dmfc0] = {0, 0},
+ [insn_dmtc0] = {0, 0},
+ [insn_dsll] = {0, 0},
+ [insn_dsll32] = {0, 0},
+ [insn_dsra] = {0, 0},
+ [insn_dsrl] = {0, 0},
+ [insn_dsrl32] = {0, 0},
+ [insn_drotr] = {0, 0},
+ [insn_drotr32] = {0, 0},
+ [insn_dsubu] = {0, 0},
+ [insn_eret] = {M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0},
+ [insn_ins] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE},
+ [insn_ext] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE},
+ [insn_j] = {M(mm_j32_op, 0, 0, 0, 0, 0), JIMM},
+ [insn_jal] = {M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM},
+ [insn_jalr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS},
+ [insn_jr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS},
+ [insn_lb] = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_ld] = {0, 0},
+ [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM},
+ [insn_ll] = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM},
+ [insn_lld] = {0, 0},
+ [insn_lui] = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},
+ [insn_lw] = {M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_mfc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD},
+ [insn_mfhi] = {M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS},
+ [insn_mflo] = {M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS},
+ [insn_mtc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD},
+ [insn_mthi] = {M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS},
+ [insn_mtlo] = {M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS},
+ [insn_mul] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD},
+ [insn_or] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD},
+ [insn_ori] = {M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM},
+ [insn_pref] = {M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM},
+ [insn_rfe] = {0, 0},
+ [insn_sc] = {M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM},
+ [insn_scd] = {0, 0},
+ [insn_sd] = {0, 0},
+ [insn_sll] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD},
+ [insn_sllv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD},
+ [insn_slt] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD},
+ [insn_sltiu] = {M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_sltu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD},
+ [insn_sra] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD},
+ [insn_srl] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD},
+ [insn_srlv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD},
+ [insn_rotr] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD},
+ [insn_subu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD},
+ [insn_sw] = {M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_sync] = {M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS},
+ [insn_tlbp] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0},
+ [insn_tlbr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0},
+ [insn_tlbwi] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0},
+ [insn_tlbwr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0},
+ [insn_wait] = {M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM},
+ [insn_wsbh] = {M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS},
+ [insn_xor] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD},
+ [insn_xori] = {M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM},
+ [insn_dins] = {0, 0},
+ [insn_dinsm] = {0, 0},
+ [insn_syscall] = {M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
+ [insn_bbit0] = {0, 0},
+ [insn_bbit1] = {0, 0},
+ [insn_lwx] = {0, 0},
+ [insn_ldx] = {0, 0},
};
#undef M
@@ -156,20 +155,17 @@ static inline u32 build_jimm(u32 arg)
*/
static void build_insn(u32 **buf, enum opcode opc, ...)
{
- struct insn *ip = NULL;
- unsigned int i;
+ const struct insn *ip;
va_list ap;
u32 op;
- for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
- if (insn_table_MM[i].opcode == opc) {
- ip = &insn_table_MM[i];
- break;
- }
-
- if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+ if (opc < 0 || opc >= insn_invalid ||
+ (opc == insn_daddiu && r4k_daddiu_bug()) ||
+ (insn_table_MM[opc].match == 0 && insn_table_MM[opc].fields == 0))
panic("Unsupported Micro-assembler instruction %d", opc);
+ ip = &insn_table_MM[opc];
+
op = ip->match;
va_start(ap, opc);
if (ip->fields & RS) {
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 2277499fe6ae..3f74f6c1f065 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -48,126 +48,145 @@
#include "uasm.c"
-static struct insn insn_table[] = {
- { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
- { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
- { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
- { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
- { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
- { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
- { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+static const struct insn const insn_table[insn_invalid] = {
+ [insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD},
+ [insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
+ [insn_andi] = {M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM},
+ [insn_bbit0] = {M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_bbit1] = {M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_beq] = {M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_beql] = {M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_bgez] = {M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM},
+ [insn_bgezl] = {M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM},
+ [insn_bgtz] = {M(bgtz_op, 0, 0, 0, 0, 0), RS | BIMM},
+ [insn_blez] = {M(blez_op, 0, 0, 0, 0, 0), RS | BIMM},
+ [insn_bltz] = {M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM},
+ [insn_bltzl] = {M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM},
+ [insn_bne] = {M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_break] = {M(spec_op, 0, 0, 0, 0, break_op), SCIMM},
#ifndef CONFIG_CPU_MIPSR6
- { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ [insn_cache] = {M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#else
- { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
+ [insn_cache] = {M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9},
#endif
- { insn_cfc1, M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD },
- { insn_cfcmsa, M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE },
- { insn_ctc1, M(cop1_op, ctc_op, 0, 0, 0, 0), RT | RD },
- { insn_ctcmsa, M(msa_op, 0, msa_ctc_op, 0, 0, msa_elm_op), RD | RE },
- { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
- { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
- { insn_di, M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT },
- { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
- { insn_divu, M(spec_op, 0, 0, 0, 0, divu_op), RS | RT },
- { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
- { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
- { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
- { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
- { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
- { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
- { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
- { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
- { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
- { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
- { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
- { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
- { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
+ [insn_cfc1] = {M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD},
+ [insn_cfcmsa] = {M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE},
+ [insn_ctc1] = {M(cop1_op, ctc_op, 0, 0, 0, 0), RT | RD},
+ [insn_ctcmsa] = {M(msa_op, 0, msa_ctc_op, 0, 0, msa_elm_op), RD | RE},
+ [insn_daddiu] = {M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_daddu] = {M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD},
+ [insn_ddivu] = {M(spec_op, 0, 0, 0, 0, ddivu_op), RS | RT},
+ [insn_di] = {M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT},
+ [insn_dins] = {M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE},
+ [insn_dinsm] = {M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE},
+ [insn_dinsu] = {M(spec3_op, 0, 0, 0, 0, dinsu_op), RS | RT | RD | RE},
+ [insn_divu] = {M(spec_op, 0, 0, 0, 0, divu_op), RS | RT},
+ [insn_dmfc0] = {M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT},
+ [insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE},
+ [insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE},
+ [insn_dsbh] = {M(spec3_op, 0, 0, 0, dsbh_op, dbshfl_op), RT | RD},
+ [insn_dshd] = {M(spec3_op, 0, 0, 0, dshd_op, dbshfl_op), RT | RD},
+ [insn_dsll] = {M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE},
+ [insn_dsll32] = {M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE},
+ [insn_dsllv] = {M(spec_op, 0, 0, 0, 0, dsllv_op), RS | RT | RD},
+ [insn_dsra] = {M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE},
+ [insn_dsra32] = {M(spec_op, 0, 0, 0, 0, dsra32_op), RT | RD | RE},
+ [insn_dsrav] = {M(spec_op, 0, 0, 0, 0, dsrav_op), RS | RT | RD},
+ [insn_dsrl] = {M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE},
+ [insn_dsrl32] = {M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE},
+ [insn_dsrlv] = {M(spec_op, 0, 0, 0, 0, dsrlv_op), RS | RT | RD},
+ [insn_dsubu] = {M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD},
+ [insn_eret] = {M(cop0_op, cop_op, 0, 0, 0, eret_op), 0},
+ [insn_ext] = {M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE},
+ [insn_ins] = {M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE},
+ [insn_j] = {M(j_op, 0, 0, 0, 0, 0), JIMM},
+ [insn_jal] = {M(jal_op, 0, 0, 0, 0, 0), JIMM},
+ [insn_jalr] = {M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD},
#ifndef CONFIG_CPU_MIPSR6
- { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
+ [insn_jr] = {M(spec_op, 0, 0, 0, 0, jr_op), RS},
#else
- { insn_jr, M(spec_op, 0, 0, 0, 0, jalr_op), RS },
+ [insn_jr] = {M(spec_op, 0, 0, 0, 0, jalr_op), RS},
#endif
- { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
- { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_lhu, M(lhu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ [insn_lb] = {M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lbu] = {M(lbu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_ld] = {M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lddir] = {M(lwc2_op, 0, 0, 0, lddir_op, mult_op), RS | RT | RD},
+ [insn_ldpte] = {M(lwc2_op, 0, 0, 0, ldpte_op, mult_op), RS | RD},
+ [insn_ldx] = {M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD},
+ [insn_lh] = {M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lhu] = {M(lhu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#ifndef CONFIG_CPU_MIPSR6
- { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ [insn_ll] = {M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lld] = {M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#else
- { insn_lld, M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9 },
- { insn_ll, M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9 },
+ [insn_ll] = {M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9},
+ [insn_lld] = {M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9},
#endif
- { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
- { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
- { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_mfhc0, M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_mfhi, M(spec_op, 0, 0, 0, 0, mfhi_op), RD },
- { insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD },
- { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_mthc0, M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_mthi, M(spec_op, 0, 0, 0, 0, mthi_op), RS },
- { insn_mtlo, M(spec_op, 0, 0, 0, 0, mtlo_op), RS },
+ [insn_lui] = {M(lui_op, 0, 0, 0, 0, 0), RT | SIMM},
+ [insn_lw] = {M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lwu] = {M(lwu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lwx] = {M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD},
+ [insn_mfc0] = {M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_mfhc0] = {M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_mfhi] = {M(spec_op, 0, 0, 0, 0, mfhi_op), RD},
+ [insn_mflo] = {M(spec_op, 0, 0, 0, 0, mflo_op), RD},
+ [insn_movn] = {M(spec_op, 0, 0, 0, 0, movn_op), RS | RT | RD},
+ [insn_movz] = {M(spec_op, 0, 0, 0, 0, movz_op), RS | RT | RD},
+ [insn_mtc0] = {M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_mthc0] = {M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_mthi] = {M(spec_op, 0, 0, 0, 0, mthi_op), RS},
+ [insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS},
#ifndef CONFIG_CPU_MIPSR6
- { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
+ [insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
#else
- { insn_mul, M(spec_op, 0, 0, 0, mult_mul_op, mult_op), RS | RT | RD},
+ [insn_mul] = {M(spec_op, 0, 0, 0, mult_mul_op, mult_op), RS | RT | RD},
#endif
- { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
+ [insn_multu] = {M(spec_op, 0, 0, 0, 0, multu_op), RS | RT},
+ [insn_nor] = {M(spec_op, 0, 0, 0, 0, nor_op), RS | RT | RD},
+ [insn_or] = {M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD},
+ [insn_ori] = {M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM},
#ifndef CONFIG_CPU_MIPSR6
- { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ [insn_pref] = {M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#else
- { insn_pref, M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9 },
+ [insn_pref] = {M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9},
#endif
- { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
- { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
+ [insn_rfe] = {M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0},
+ [insn_rotr] = {M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE},
+ [insn_sb] = {M(sb_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#ifndef CONFIG_CPU_MIPSR6
- { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ [insn_sc] = {M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_scd] = {M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#else
- { insn_scd, M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9 },
- { insn_sc, M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9 },
+ [insn_sc] = {M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9},
+ [insn_scd] = {M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9},
#endif
- { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
- { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD },
- { insn_slt, M(spec_op, 0, 0, 0, 0, slt_op), RS | RT | RD },
- { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD },
- { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
- { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_srlv, M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD },
- { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
- { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sync, M(spec_op, 0, 0, 0, 0, sync_op), RE },
- { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
- { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
- { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
- { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
- { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
- { insn_wait, M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM },
- { insn_wsbh, M(spec3_op, 0, 0, 0, wsbh_op, bshfl_op), RT | RD },
- { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
- { insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD },
- { insn_ldpte, M(lwc2_op, 0, 0, 0, ldpte_op, mult_op), RS | RD },
- { insn_lddir, M(lwc2_op, 0, 0, 0, lddir_op, mult_op), RS | RT | RD },
- { insn_invalid, 0, 0 }
+ [insn_sd] = {M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_sh] = {M(sh_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_sll] = {M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE},
+ [insn_sllv] = {M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD},
+ [insn_slt] = {M(spec_op, 0, 0, 0, 0, slt_op), RS | RT | RD},
+ [insn_slti] = {M(slti_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_sltiu] = {M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_sltu] = {M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD},
+ [insn_sra] = {M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE},
+ [insn_srl] = {M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE},
+ [insn_srlv] = {M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD},
+ [insn_subu] = {M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD},
+ [insn_sw] = {M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_sync] = {M(spec_op, 0, 0, 0, 0, sync_op), RE},
+ [insn_syscall] = {M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
+ [insn_tlbp] = {M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0},
+ [insn_tlbr] = {M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0},
+ [insn_tlbwi] = {M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0},
+ [insn_tlbwr] = {M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0},
+ [insn_wait] = {M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM},
+ [insn_wsbh] = {M(spec3_op, 0, 0, 0, wsbh_op, bshfl_op), RT | RD},
+ [insn_xor] = {M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD},
+ [insn_xori] = {M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM},
+ [insn_yield] = {M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD},
};
#undef M
@@ -196,20 +215,17 @@ static inline u32 build_jimm(u32 arg)
*/
static void build_insn(u32 **buf, enum opcode opc, ...)
{
- struct insn *ip = NULL;
- unsigned int i;
+ const struct insn *ip;
va_list ap;
u32 op;
- for (i = 0; insn_table[i].opcode != insn_invalid; i++)
- if (insn_table[i].opcode == opc) {
- ip = &insn_table[i];
- break;
- }
-
- if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+ if (opc < 0 || opc >= insn_invalid ||
+ (opc == insn_daddiu && r4k_daddiu_bug()) ||
+ (insn_table[opc].match == 0 && insn_table[opc].fields == 0))
panic("Unsupported Micro-assembler instruction %d", opc);
+ ip = &insn_table[opc];
+
op = ip->match;
va_start(ap, opc);
if (ip->fields & RS)
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 730363b59bac..57570c0649b4 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -46,26 +46,29 @@ enum fields {
#define SIMM9_MASK 0x1ff
enum opcode {
- insn_invalid,
insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1,
- insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
- insn_bne, insn_cache, insn_cfc1, insn_cfcmsa, insn_ctc1, insn_ctcmsa,
- insn_daddiu, insn_daddu, insn_di, insn_dins, insn_dinsm, insn_divu,
- insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
- insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret,
- insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb,
- insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
- insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, insn_mflo, insn_mtc0,
- insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_or, insn_ori,
- insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll,
- insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, insn_srl,
+ insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bgtz, insn_blez,
+ insn_bltz, insn_bltzl, insn_bne, insn_break, insn_cache, insn_cfc1,
+ insn_cfcmsa, insn_ctc1, insn_ctcmsa, insn_daddiu, insn_daddu, insn_ddivu,
+ insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu, insn_dmfc0,
+ insn_dmtc0, insn_dmultu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd,
+ insn_dsll, insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav,
+ insn_dsrl, insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext,
+ insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu,
+ insn_ld, insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu,
+ insn_ll, insn_lld, insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0,
+ insn_mfhc0, insn_mfhi, insn_mflo, insn_movn, insn_movz, insn_mtc0,
+ insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_nor,
+ insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb,
+ insn_sc, insn_scd, insn_sd, insn_sh, insn_sll, insn_sllv,
+ insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srl,
insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp,
insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor,
- insn_xori, insn_yield, insn_lddir, insn_ldpte, insn_lhu,
+ insn_xori, insn_yield,
+ insn_invalid /* insn_invalid must be last */
};
struct insn {
- enum opcode opcode;
u32 match;
enum fields fields;
};
@@ -215,6 +218,13 @@ Ip_u2u1msbu3(op) \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
+#define I_u2u1msb32msb3(op) \
+Ip_u2u1msbu3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, c+d-33, c-32); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
#define I_u2u1msbdu3(op) \
Ip_u2u1msbu3(op) \
{ \
@@ -265,25 +275,36 @@ I_u1u2s3(_beq)
I_u1u2s3(_beql)
I_u1s2(_bgez)
I_u1s2(_bgezl)
+I_u1s2(_bgtz)
+I_u1s2(_blez)
I_u1s2(_bltz)
I_u1s2(_bltzl)
I_u1u2s3(_bne)
+I_u1(_break)
I_u2s3u1(_cache)
I_u1u2(_cfc1)
I_u2u1(_cfcmsa)
I_u1u2(_ctc1)
I_u2u1(_ctcmsa)
+I_u1u2(_ddivu)
I_u1u2u3(_dmfc0)
I_u1u2u3(_dmtc0)
+I_u1u2(_dmultu)
I_u2u1s3(_daddiu)
I_u3u1u2(_daddu)
I_u1(_di);
I_u1u2(_divu)
+I_u2u1(_dsbh);
+I_u2u1(_dshd);
I_u2u1u3(_dsll)
I_u2u1u3(_dsll32)
+I_u3u2u1(_dsllv)
I_u2u1u3(_dsra)
+I_u2u1u3(_dsra32)
+I_u3u2u1(_dsrav)
I_u2u1u3(_dsrl)
I_u2u1u3(_dsrl32)
+I_u3u2u1(_dsrlv)
I_u2u1u3(_drotr)
I_u2u1u3(_drotr32)
I_u3u1u2(_dsubu)
@@ -295,6 +316,7 @@ I_u1(_jal)
I_u2u1(_jalr)
I_u1(_jr)
I_u2s3u1(_lb)
+I_u2s3u1(_lbu)
I_u2s3u1(_ld)
I_u2s3u1(_lh)
I_u2s3u1(_lhu)
@@ -302,8 +324,11 @@ I_u2s3u1(_ll)
I_u2s3u1(_lld)
I_u1s2(_lui)
I_u2s3u1(_lw)
+I_u2s3u1(_lwu)
I_u1u2u3(_mfc0)
I_u1u2u3(_mfhc0)
+I_u3u1u2(_movn)
+I_u3u1u2(_movz)
I_u1(_mfhi)
I_u1(_mflo)
I_u1u2u3(_mtc0)
@@ -311,15 +336,20 @@ I_u1u2u3(_mthc0)
I_u1(_mthi)
I_u1(_mtlo)
I_u3u1u2(_mul)
-I_u2u1u3(_ori)
+I_u1u2(_multu)
+I_u3u1u2(_nor)
I_u3u1u2(_or)
+I_u2u1u3(_ori)
I_0(_rfe)
+I_u2s3u1(_sb)
I_u2s3u1(_sc)
I_u2s3u1(_scd)
I_u2s3u1(_sd)
+I_u2s3u1(_sh)
I_u2u1u3(_sll)
I_u3u2u1(_sllv)
I_s3s1s2(_slt)
+I_u2u1s3(_slti)
I_u2u1s3(_sltiu)
I_u3u1u2(_sltu)
I_u2u1u3(_sra)
@@ -340,6 +370,7 @@ I_u2u1u3(_xori)
I_u2u1(_yield)
I_u2u1msbu3(_dins);
I_u2u1msb32u3(_dinsm);
+I_u2u1msb32msb3(_dinsu);
I_u1(_syscall);
I_u1u2s3(_bbit0);
I_u1u2s3(_bbit1);
diff --git a/arch/mips/net/Makefile b/arch/mips/net/Makefile
index 8c2771401f54..47d678416715 100644
--- a/arch/mips/net/Makefile
+++ b/arch/mips/net/Makefile
@@ -1,3 +1,4 @@
# MIPS networking code
-obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_asm.o
+obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o
+obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c
index ce89c9e294f9..974276e828b2 100644
--- a/arch/mips/vdso/gettimeofday.c
+++ b/arch/mips/vdso/gettimeofday.c
@@ -20,6 +20,46 @@
#include <asm/unistd.h>
#include <asm/vdso.h>
+#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
+
+static __always_inline long gettimeofday_fallback(struct timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct timezone *tz asm("a1") = _tz;
+ register struct timeval *tv asm("a0") = _tv;
+ register long ret asm("v0");
+ register long nr asm("v0") = __NR_gettimeofday;
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (tv), "r" (tz), "r" (nr)
+ : "memory");
+
+ return error ? -ret : ret;
+}
+
+#endif
+
+static __always_inline long clock_gettime_fallback(clockid_t _clkid,
+ struct timespec *_ts)
+{
+ register struct timespec *ts asm("a1") = _ts;
+ register clockid_t clkid asm("a0") = _clkid;
+ register long ret asm("v0");
+ register long nr asm("v0") = __NR_clock_gettime;
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return error ? -ret : ret;
+}
+
static __always_inline int do_realtime_coarse(struct timespec *ts,
const union mips_vdso_data *data)
{
@@ -39,8 +79,8 @@ static __always_inline int do_monotonic_coarse(struct timespec *ts,
const union mips_vdso_data *data)
{
u32 start_seq;
- u32 to_mono_sec;
- u32 to_mono_nsec;
+ u64 to_mono_sec;
+ u64 to_mono_nsec;
do {
start_seq = vdso_data_read_begin(data);
@@ -148,8 +188,8 @@ static __always_inline int do_monotonic(struct timespec *ts,
{
u32 start_seq;
u64 ns;
- u32 to_mono_sec;
- u32 to_mono_nsec;
+ u64 to_mono_sec;
+ u64 to_mono_nsec;
do {
start_seq = vdso_data_read_begin(data);
@@ -187,7 +227,7 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
ret = do_realtime(&ts, data);
if (ret)
- return ret;
+ return gettimeofday_fallback(tv, tz);
if (tv) {
tv->tv_sec = ts.tv_sec;
@@ -202,12 +242,12 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
return 0;
}
-#endif /* CONFIG_CLKSRC_MIPS_GIC */
+#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
{
const union mips_vdso_data *data = get_vdso_data();
- int ret;
+ int ret = -1;
switch (clkid) {
case CLOCK_REALTIME_COARSE:
@@ -223,10 +263,11 @@ int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
ret = do_monotonic(ts, data);
break;
default:
- ret = -ENOSYS;
break;
}
- /* If we return -ENOSYS libc should fall back to a syscall. */
+ if (ret)
+ ret = clock_gettime_fallback(clkid, ts);
+
return ret;
}
diff --git a/arch/mn10300/include/asm/nmi.h b/arch/mn10300/include/asm/nmi.h
index f3671cbbc117..b05627597b1b 100644
--- a/arch/mn10300/include/asm/nmi.h
+++ b/arch/mn10300/include/asm/nmi.h
@@ -11,4 +11,6 @@
#ifndef _ASM_NMI_H
#define _ASM_NMI_H
+extern void arch_touch_nmi_watchdog(void);
+
#endif /* _ASM_NMI_H */
diff --git a/arch/mn10300/kernel/mn10300-watchdog-low.S b/arch/mn10300/kernel/mn10300-watchdog-low.S
index f2f5c9cfaabd..34f8773de7d0 100644
--- a/arch/mn10300/kernel/mn10300-watchdog-low.S
+++ b/arch/mn10300/kernel/mn10300-watchdog-low.S
@@ -50,9 +50,9 @@ watchdog_handler:
# we can't inline it)
#
###############################################################################
- .globl touch_nmi_watchdog
- .type touch_nmi_watchdog,@function
-touch_nmi_watchdog:
+ .globl arch_touch_nmi_watchdog
+ .type arch_touch_nmi_watchdog,@function
+arch_touch_nmi_watchdog:
clr d0
clr d1
mov watchdog_alert_counter, a0
@@ -63,4 +63,4 @@ touch_nmi_watchdog:
lne
ret [],0
- .size touch_nmi_watchdog,.-touch_nmi_watchdog
+ .size arch_touch_nmi_watchdog,.-arch_touch_nmi_watchdog
diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c
index a2d8e6938d67..0d5641beadf5 100644
--- a/arch/mn10300/kernel/mn10300-watchdog.c
+++ b/arch/mn10300/kernel/mn10300-watchdog.c
@@ -31,7 +31,7 @@ static unsigned int watchdog;
static unsigned int watchdog_hz = 1;
unsigned int watchdog_alert_counter[NR_CPUS];
-EXPORT_SYMBOL(touch_nmi_watchdog);
+EXPORT_SYMBOL(arch_touch_nmi_watchdog);
/*
* the best way to detect whether a CPU has a 'hard lockup' problem
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index e1a843def56f..896c26ae0da9 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -1,8 +1,6 @@
generic-y += atomic.h
-generic-y += auxvec.h
generic-y += barrier.h
generic-y += bitops.h
-generic-y += bitsperlong.h
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
@@ -12,55 +10,33 @@ generic-y += device.h
generic-y += div64.h
generic-y += dma.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
-generic-y += fcntl.h
generic-y += ftrace.h
generic-y += futex.h
generic-y += hardirq.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
-generic-y += kvm_para.h
+generic-y += kprobes.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
generic-y += module.h
-generic-y += msgbuf.h
-generic-y += param.h
generic-y += pci.h
generic-y += percpu.h
-generic-y += poll.h
-generic-y += posix_types.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += sections.h
generic-y += segment.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += shmbuf.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
generic-y += spinlock.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += types.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/nios2/include/asm/signal.h b/arch/nios2/include/asm/signal.h
deleted file mode 100644
index bbcf11eecb01..000000000000
--- a/arch/nios2/include/asm/signal.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright Altera Corporation (C) 2013. All rights reserved
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
- */
-#ifndef _NIOS2_SIGNAL_H
-#define _NIOS2_SIGNAL_H
-
-#include <uapi/asm/signal.h>
-
-#endif /* _NIOS2_SIGNAL_H */
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index 51eff5bc2eb4..ffca24da7647 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -1,6 +1,29 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += kvm_para.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += sembuf.h
generic-y += setup.h
+generic-y += shmbuf.h
generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
generic-y += ucontext.h
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 091585addb91..5bea416a7792 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -1,6 +1,4 @@
-generic-y += auxvec.h
generic-y += barrier.h
-generic-y += bitsperlong.h
generic-y += bug.h
generic-y += bugs.h
generic-y += cacheflush.h
@@ -11,57 +9,32 @@ generic-y += device.h
generic-y += div64.h
generic-y += dma.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
-generic-y += fcntl.h
generic-y += ftrace.h
generic-y += hardirq.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
-generic-y += kvm_para.h
+generic-y += kprobes.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
generic-y += module.h
-generic-y += msgbuf.h
generic-y += pci.h
generic-y += percpu.h
-generic-y += poll.h
-generic-y += posix_types.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += sections.h
generic-y += segment.h
-generic-y += sembuf.h
-generic-y += setup.h
-generic-y += shmbuf.h
-generic-y += shmparam.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
generic-y += string.h
-generic-y += swab.h
generic-y += switch_to.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += types.h
-generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index b55fc2ae1e8c..62286dbeb904 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -1,4 +1,31 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += kvm_para.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
generic-y += siginfo.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += swab.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index a9909c2d04c5..a41139575ab4 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,5 +1,3 @@
-
-generic-y += auxvec.h
generic-y += barrier.h
generic-y += clkdev.h
generic-y += current.h
@@ -11,14 +9,12 @@ generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
-generic-y += kvm_para.h
+generic-y += kprobes.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += param.h
generic-y += percpu.h
-generic-y += poll.h
generic-y += preempt.h
generic-y += seccomp.h
generic-y += segment.h
@@ -28,4 +24,3 @@ generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 1fd962a07f52..cab33a0d0e82 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -6,7 +6,6 @@
*/
#include <asm/page.h>
#include <asm/cache.h>
-#include <asm-generic/uaccess-unaligned.h>
#include <linux/bug.h>
#include <linux/string.h>
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild
index 3971c60a7e7f..196d2a4efb31 100644
--- a/arch/parisc/include/uapi/asm/Kbuild
+++ b/arch/parisc/include/uapi/asm/Kbuild
@@ -1,4 +1,8 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += kvm_para.h
+generic-y += param.h
+generic-y += poll.h
generic-y += resource.h
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 7177a3f4f418..36f858c37ca7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -82,7 +82,7 @@ config NR_IRQS
config NMI_IPI
bool
- depends on SMP && (DEBUGGER || KEXEC_CORE)
+ depends on SMP && (DEBUGGER || KEXEC_CORE || HARDLOCKUP_DETECTOR)
default y
config STACKTRACE_SUPPORT
@@ -125,6 +125,7 @@ config PPC
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select ARCH_HAS_SG_CHAIN
@@ -192,11 +193,13 @@ config PPC
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
- select HAVE_NMI if PERF_EVENTS
+ select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
+ select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
select HAVE_OPROFILE
select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if SMP
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 2b90335194a7..a2cc8010cd72 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -560,7 +560,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
+static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
{
long t1, t2;
@@ -579,7 +579,7 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer", "memory");
- return t1;
+ return t1 != 0;
}
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 20b1485ff1e8..e2329db9d6f4 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
#else
struct page *page;
- page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_REPEAT),
+ page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
4);
if (!page)
return NULL;
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
index ff1ccb375e60..6f8e79cd35d8 100644
--- a/arch/powerpc/include/asm/nmi.h
+++ b/arch/powerpc/include/asm/nmi.h
@@ -1,4 +1,15 @@
#ifndef _ASM_NMI_H
#define _ASM_NMI_H
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+extern void arch_touch_nmi_watchdog(void);
+
+extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
+ bool exclude_self);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+
+#else
+static inline void arch_touch_nmi_watchdog(void) {}
+#endif
+
#endif /* _ASM_NMI_H */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index ef930ba500f9..3130a73652c7 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -876,6 +876,15 @@ struct OpalIoPhb4ErrorData {
enum {
OPAL_REINIT_CPUS_HILE_BE = (1 << 0),
OPAL_REINIT_CPUS_HILE_LE = (1 << 1),
+
+ /* These two define the base MMU mode of the host on P9
+ *
+ * On P9 Nimbus DD2.0 and Cumlus (and later), KVM can still
+ * create hash guests in "radix" mode with care (full core
+ * switch only).
+ */
+ OPAL_REINIT_CPUS_MMU_HASH = (1 << 2),
+ OPAL_REINIT_CPUS_MMU_RADIX = (1 << 3),
};
typedef struct oppanel_line {
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7e50e47375d6..a3b6575c7842 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1303,7 +1303,7 @@ static inline void msr_check_and_clear(unsigned long bits)
" .llong 0\n" \
".previous" \
: "=r" (rval) \
- : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \
+ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
rval;})
#else
#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ebddb2111d87..8ea98504f900 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -55,6 +55,8 @@ struct smp_ops_t {
int (*cpu_bootable)(unsigned int nr);
};
+extern void smp_flush_nmi_ipi(u64 delay_us);
+extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern void smp_send_debugger_break(void);
extern void start_secondary_resume(void);
extern void smp_generic_give_timebase(void);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 4cf57f2126e6..9c0e60ca1666 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -90,9 +90,6 @@
#define __put_user_inatomic(x, ptr) \
__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-#define __get_user_unaligned __get_user
-#define __put_user_unaligned __put_user
-
extern long __put_user_bad(void);
/*
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 0845eebc5af3..4aa7c147e447 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
obj-$(CONFIG_VDSO32) += vdso32/
+obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 10cb2896b2ae..610955fe8b81 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -218,13 +218,20 @@ __init_tlb_power8:
ptesync
1: blr
+/*
+ * Flush the TLB in hash mode. Hash must flush with RIC=2 once for process
+ * and one for partition scope to clear process and partition table entries.
+ */
__init_tlb_power9:
- li r6,POWER9_TLB_SETS_HASH
+ li r6,POWER9_TLB_SETS_HASH - 1
mtctr r6
li r7,0xc00 /* IS field = 0b11 */
+ li r8,0
ptesync
-2: tlbiel r7
- addi r7,r7,0x1000
+ PPC_TLBIEL(7, 8, 2, 1, 0)
+ PPC_TLBIEL(7, 8, 2, 0, 0)
+2: addi r7,r7,0x1000
+ PPC_TLBIEL(7, 8, 0, 0, 0)
bdnz 2b
ptesync
1: blr
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 4c7656dc4e04..1df770e8cbe0 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -94,9 +94,6 @@ static void (*init_pmu_registers)(void);
static void cpufeatures_flush_tlb(void)
{
- unsigned long rb;
- unsigned int i, num_sets;
-
/*
* This is a temporary measure to keep equivalent TLB flush as the
* cputable based setup code.
@@ -105,24 +102,15 @@ static void cpufeatures_flush_tlb(void)
case PVR_POWER8:
case PVR_POWER8E:
case PVR_POWER8NVL:
- num_sets = POWER8_TLB_SETS;
+ __flush_tlb_power8(POWER8_TLB_SETS);
break;
case PVR_POWER9:
- num_sets = POWER9_TLB_SETS_HASH;
+ __flush_tlb_power9(POWER9_TLB_SETS_HASH);
break;
default:
- num_sets = 1;
pr_err("unknown CPU version for boot TLB flush\n");
break;
}
-
- asm volatile("ptesync" : : : "memory");
- rb = TLBIEL_INVAL_SET;
- for (i = 0; i < num_sets; i++) {
- asm volatile("tlbiel %0" : : "r" (rb));
- rb += 1 << TLBIEL_INVAL_SET_SHIFT;
- }
- asm volatile("ptesync" : : : "memory");
}
static void __restore_cpu_cpufeatures(void)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 4c18a5fbb4bb..e6d8354d79ef 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1314,6 +1314,31 @@ EXC_REAL_NONE(0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
#endif
+#if defined(CONFIG_HARDLOCKUP_DETECTOR) && defined(CONFIG_HAVE_HARDLOCKUP_DETECTOR_ARCH)
+
+#define MASKED_DEC_HANDLER_LABEL 3f
+
+#define MASKED_DEC_HANDLER(_H) \
+3: /* soft-nmi */ \
+ std r12,PACA_EXGEN+EX_R12(r13); \
+ GET_SCRATCH0(r10); \
+ std r10,PACA_EXGEN+EX_R13(r13); \
+ EXCEPTION_PROLOG_PSERIES_1(soft_nmi_common, _H)
+
+EXC_COMMON_BEGIN(soft_nmi_common)
+ mr r10,r1
+ ld r1,PACAEMERGSP(r13)
+ ld r1,PACA_NMI_EMERG_SP(r13)
+ subi r1,r1,INT_FRAME_SIZE
+ EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900,
+ system_reset, soft_nmi_interrupt,
+ ADD_NVGPRS;ADD_RECONCILE)
+ b ret_from_except
+
+#else
+#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
+#define MASKED_DEC_HANDLER(_H)
+#endif
/*
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
@@ -1336,7 +1361,7 @@ masked_##_H##interrupt: \
lis r10,0x7fff; \
ori r10,r10,0xffff; \
mtspr SPRN_DEC,r10; \
- b 2f; \
+ b MASKED_DEC_HANDLER_LABEL; \
1: cmpwi r10,PACA_IRQ_DBELL; \
beq 2f; \
cmpwi r10,PACA_IRQ_HMI; \
@@ -1351,7 +1376,8 @@ masked_##_H##interrupt: \
ld r11,PACA_EXGEN+EX_R11(r13); \
GET_SCRATCH0(r13); \
##_H##rfid; \
- b .
+ b .; \
+ MASKED_DEC_HANDLER(_H)
/*
* Real mode exceptions actually use this too, but alternate
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 3079518f2245..dc0c49cfd90a 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -999,8 +999,7 @@ static int fadump_create_elfcore_headers(char *bufp)
phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note());
phdr->p_offset = phdr->p_paddr;
- phdr->p_memsz = vmcoreinfo_max_size;
- phdr->p_filesz = vmcoreinfo_max_size;
+ phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE;
/* Increment number of program headers. */
(elf->e_phnum)++;
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 9ad37f827a97..1086ea37c832 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -25,6 +25,7 @@
#include <linux/kvm_para.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/nmi.h> /* hardlockup_detector_disable() */
#include <asm/reg.h>
#include <asm/sections.h>
@@ -718,6 +719,12 @@ static __init void kvm_free_tmp(void)
static int __init kvm_guest_init(void)
{
+ /*
+ * The hardlockup detector is likely to get false positives in
+ * KVM guests, so disable it by default.
+ */
+ hardlockup_detector_disable();
+
if (!kvm_para_available())
goto free_tmp;
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index d24e689e893f..b76ca198e09c 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -53,6 +53,60 @@ static void flush_tlb_206(unsigned int num_sets, unsigned int action)
asm volatile("ptesync" : : : "memory");
}
+static void flush_tlb_300(unsigned int num_sets, unsigned int action)
+{
+ unsigned long rb;
+ unsigned int i;
+ unsigned int r;
+
+ switch (action) {
+ case TLB_INVAL_SCOPE_GLOBAL:
+ rb = TLBIEL_INVAL_SET;
+ break;
+ case TLB_INVAL_SCOPE_LPID:
+ rb = TLBIEL_INVAL_SET_LPID;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ asm volatile("ptesync" : : : "memory");
+
+ if (early_radix_enabled())
+ r = 1;
+ else
+ r = 0;
+
+ /*
+ * First flush table/PWC caches with set 0, then flush the
+ * rest of the sets, partition scope. Radix must then do it
+ * all again with process scope. Hash just has to flush
+ * process table.
+ */
+ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
+ "r"(rb), "r"(0), "i"(2), "i"(0), "r"(r));
+ for (i = 1; i < num_sets; i++) {
+ unsigned long set = i * (1<<TLBIEL_INVAL_SET_SHIFT);
+
+ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
+ "r"(rb+set), "r"(0), "i"(2), "i"(0), "r"(r));
+ }
+
+ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
+ "r"(rb), "r"(0), "i"(2), "i"(1), "r"(r));
+ if (early_radix_enabled()) {
+ for (i = 1; i < num_sets; i++) {
+ unsigned long set = i * (1<<TLBIEL_INVAL_SET_SHIFT);
+
+ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
+ "r"(rb+set), "r"(0), "i"(2), "i"(1), "r"(r));
+ }
+ }
+
+ asm volatile("ptesync" : : : "memory");
+}
+
/*
* Generic routines to flush TLB on POWER processors. These routines
* are used as flush_tlb hook in the cpu_spec.
@@ -79,7 +133,7 @@ void __flush_tlb_power9(unsigned int action)
else
num_sets = POWER9_TLB_SETS_HASH;
- flush_tlb_206(num_sets, action);
+ flush_tlb_300(num_sets, action);
}
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index c119044cad0d..8ac0bd2bddb0 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -614,6 +614,18 @@ _GLOBAL(kexec_sequence)
li r0,0
std r0,16(r1)
+BEGIN_FTR_SECTION
+ /*
+ * This is the best time to turn AMR/IAMR off.
+ * key 0 is used in radix for supervisor<->user
+ * protection, but on hash key 0 is reserved
+ * ideally we want to enter with a clean state.
+ * NOTE, we rely on r0 being 0 from above.
+ */
+ mtspr SPRN_IAMR,r0
+ mtspr SPRN_AMOR,r0
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
/* save regs for local vars on new stack.
* yes, we won't go back, but ...
*/
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index dd8a04f3053a..613f79f03877 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -15,6 +15,9 @@
#undef DEBUG_PROM
+/* we cannot use FORTIFY as it brings in new symbols */
+#define __NO_FORTIFY
+
#include <stdarg.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4640f6d64f8b..af23d4b576ec 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -751,22 +751,3 @@ unsigned long memory_block_size_bytes(void)
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
#endif
-
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-u64 hw_nmi_get_sample_period(int watchdog_thresh)
-{
- return ppc_proc_freq * watchdog_thresh;
-}
-
-/*
- * The hardlockup detector breaks PMU event based branches and is likely
- * to get false positives in KVM guests, so disable it by default.
- */
-static int __init disable_hardlockup_detector(void)
-{
- hardlockup_detector_disable();
-
- return 0;
-}
-early_initcall(disable_hardlockup_detector);
-#endif
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c6b8bace1766..997c88d54acf 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -435,13 +435,31 @@ static void do_smp_send_nmi_ipi(int cpu)
}
}
+void smp_flush_nmi_ipi(u64 delay_us)
+{
+ unsigned long flags;
+
+ nmi_ipi_lock_start(&flags);
+ while (nmi_ipi_busy_count) {
+ nmi_ipi_unlock_end(&flags);
+ udelay(1);
+ if (delay_us) {
+ delay_us--;
+ if (!delay_us)
+ return;
+ }
+ nmi_ipi_lock_start(&flags);
+ }
+ nmi_ipi_unlock_end(&flags);
+}
+
/*
* - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
* - fn is the target callback function.
* - delay_us > 0 is the delay before giving up waiting for targets to
* enter the handler, == 0 specifies indefinite delay.
*/
-static int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
+int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
{
unsigned long flags;
int me = raw_smp_processor_id();
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
new file mode 100644
index 000000000000..b67f8b03a32d
--- /dev/null
+++ b/arch/powerpc/kernel/watchdog.c
@@ -0,0 +1,386 @@
+/*
+ * Watchdog support on powerpc systems.
+ *
+ * Copyright 2017, IBM Corporation.
+ *
+ * This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c
+ */
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/cpu.h>
+#include <linux/nmi.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/kprobes.h>
+#include <linux/hardirq.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/kdebug.h>
+#include <linux/sched/debug.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+
+#include <asm/paca.h>
+
+/*
+ * The watchdog has a simple timer that runs on each CPU, once per timer
+ * period. This is the heartbeat.
+ *
+ * Then there are checks to see if the heartbeat has not triggered on a CPU
+ * for the panic timeout period. Currently the watchdog only supports an
+ * SMP check, so the heartbeat only turns on when we have 2 or more CPUs.
+ *
+ * This is not an NMI watchdog, but Linux uses that name for a generic
+ * watchdog in some cases, so NMI gets used in some places.
+ */
+
+static cpumask_t wd_cpus_enabled __read_mostly;
+
+static u64 wd_panic_timeout_tb __read_mostly; /* timebase ticks until panic */
+static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
+
+static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
+
+static DEFINE_PER_CPU(struct timer_list, wd_timer);
+static DEFINE_PER_CPU(u64, wd_timer_tb);
+
+/*
+ * These are for the SMP checker. CPUs clear their pending bit in their
+ * heartbeat. If the bitmask becomes empty, the time is noted and the
+ * bitmask is refilled.
+ *
+ * All CPUs clear their bit in the pending mask every timer period.
+ * Once all have cleared, the time is noted and the bits are reset.
+ * If the time since all clear was greater than the panic timeout,
+ * we can panic with the list of stuck CPUs.
+ *
+ * This will work best with NMI IPIs for crash code so the stuck CPUs
+ * can be pulled out to get their backtraces.
+ */
+static unsigned long __wd_smp_lock;
+static cpumask_t wd_smp_cpus_pending;
+static cpumask_t wd_smp_cpus_stuck;
+static u64 wd_smp_last_reset_tb;
+
+static inline void wd_smp_lock(unsigned long *flags)
+{
+ /*
+ * Avoid locking layers if possible.
+ * This may be called from low level interrupt handlers at some
+ * point in future.
+ */
+ local_irq_save(*flags);
+ while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock)))
+ cpu_relax();
+}
+
+static inline void wd_smp_unlock(unsigned long *flags)
+{
+ clear_bit_unlock(0, &__wd_smp_lock);
+ local_irq_restore(*flags);
+}
+
+static void wd_lockup_ipi(struct pt_regs *regs)
+{
+ pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", raw_smp_processor_id());
+ print_modules();
+ print_irqtrace_events(current);
+ if (regs)
+ show_regs(regs);
+ else
+ dump_stack();
+
+ if (hardlockup_panic)
+ nmi_panic(regs, "Hard LOCKUP");
+}
+
+static void set_cpu_stuck(int cpu, u64 tb)
+{
+ cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
+ cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
+ if (cpumask_empty(&wd_smp_cpus_pending)) {
+ wd_smp_last_reset_tb = tb;
+ cpumask_andnot(&wd_smp_cpus_pending,
+ &wd_cpus_enabled,
+ &wd_smp_cpus_stuck);
+ }
+}
+
+static void watchdog_smp_panic(int cpu, u64 tb)
+{
+ unsigned long flags;
+ int c;
+
+ wd_smp_lock(&flags);
+ /* Double check some things under lock */
+ if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb)
+ goto out;
+ if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
+ goto out;
+ if (cpumask_weight(&wd_smp_cpus_pending) == 0)
+ goto out;
+
+ pr_emerg("Watchdog CPU:%d detected Hard LOCKUP other CPUS:%*pbl\n",
+ cpu, cpumask_pr_args(&wd_smp_cpus_pending));
+
+ /*
+ * Try to trigger the stuck CPUs.
+ */
+ for_each_cpu(c, &wd_smp_cpus_pending) {
+ if (c == cpu)
+ continue;
+ smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
+ }
+ smp_flush_nmi_ipi(1000000);
+
+ /* Take the stuck CPU out of the watch group */
+ for_each_cpu(c, &wd_smp_cpus_pending)
+ set_cpu_stuck(c, tb);
+
+out:
+ wd_smp_unlock(&flags);
+
+ printk_safe_flush();
+ /*
+ * printk_safe_flush() seems to require another print
+ * before anything actually goes out to console.
+ */
+ if (sysctl_hardlockup_all_cpu_backtrace)
+ trigger_allbutself_cpu_backtrace();
+
+ if (hardlockup_panic)
+ nmi_panic(NULL, "Hard LOCKUP");
+}
+
+static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
+{
+ if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
+ if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
+ unsigned long flags;
+
+ pr_emerg("Watchdog CPU:%d became unstuck\n", cpu);
+ wd_smp_lock(&flags);
+ cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
+ wd_smp_unlock(&flags);
+ }
+ return;
+ }
+ cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
+ if (cpumask_empty(&wd_smp_cpus_pending)) {
+ unsigned long flags;
+
+ wd_smp_lock(&flags);
+ if (cpumask_empty(&wd_smp_cpus_pending)) {
+ wd_smp_last_reset_tb = tb;
+ cpumask_andnot(&wd_smp_cpus_pending,
+ &wd_cpus_enabled,
+ &wd_smp_cpus_stuck);
+ }
+ wd_smp_unlock(&flags);
+ }
+}
+
+static void watchdog_timer_interrupt(int cpu)
+{
+ u64 tb = get_tb();
+
+ per_cpu(wd_timer_tb, cpu) = tb;
+
+ wd_smp_clear_cpu_pending(cpu, tb);
+
+ if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb)
+ watchdog_smp_panic(cpu, tb);
+}
+
+void soft_nmi_interrupt(struct pt_regs *regs)
+{
+ unsigned long flags;
+ int cpu = raw_smp_processor_id();
+ u64 tb;
+
+ if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
+ return;
+
+ nmi_enter();
+ tb = get_tb();
+ if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
+ per_cpu(wd_timer_tb, cpu) = tb;
+
+ wd_smp_lock(&flags);
+ if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
+ wd_smp_unlock(&flags);
+ goto out;
+ }
+ set_cpu_stuck(cpu, tb);
+
+ pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", cpu);
+ print_modules();
+ print_irqtrace_events(current);
+ if (regs)
+ show_regs(regs);
+ else
+ dump_stack();
+
+ wd_smp_unlock(&flags);
+
+ if (sysctl_hardlockup_all_cpu_backtrace)
+ trigger_allbutself_cpu_backtrace();
+
+ if (hardlockup_panic)
+ nmi_panic(regs, "Hard LOCKUP");
+ }
+ if (wd_panic_timeout_tb < 0x7fffffff)
+ mtspr(SPRN_DEC, wd_panic_timeout_tb);
+
+out:
+ nmi_exit();
+}
+
+static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
+{
+ t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
+ if (wd_timer_period_ms > 1000)
+ t->expires = __round_jiffies_up(t->expires, cpu);
+ add_timer_on(t, cpu);
+}
+
+static void wd_timer_fn(unsigned long data)
+{
+ struct timer_list *t = this_cpu_ptr(&wd_timer);
+ int cpu = smp_processor_id();
+
+ watchdog_timer_interrupt(cpu);
+
+ wd_timer_reset(cpu, t);
+}
+
+void arch_touch_nmi_watchdog(void)
+{
+ int cpu = smp_processor_id();
+
+ watchdog_timer_interrupt(cpu);
+}
+EXPORT_SYMBOL(arch_touch_nmi_watchdog);
+
+static void start_watchdog_timer_on(unsigned int cpu)
+{
+ struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
+
+ per_cpu(wd_timer_tb, cpu) = get_tb();
+
+ setup_pinned_timer(t, wd_timer_fn, 0);
+ wd_timer_reset(cpu, t);
+}
+
+static void stop_watchdog_timer_on(unsigned int cpu)
+{
+ struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
+
+ del_timer_sync(t);
+}
+
+static int start_wd_on_cpu(unsigned int cpu)
+{
+ if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+ return 0;
+
+ if (watchdog_suspended)
+ return 0;
+
+ if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
+ return 0;
+
+ cpumask_set_cpu(cpu, &wd_cpus_enabled);
+ if (cpumask_weight(&wd_cpus_enabled) == 1) {
+ cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
+ wd_smp_last_reset_tb = get_tb();
+ }
+ smp_wmb();
+ start_watchdog_timer_on(cpu);
+
+ return 0;
+}
+
+static int stop_wd_on_cpu(unsigned int cpu)
+{
+ if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
+ return 0; /* Can happen in CPU unplug case */
+
+ stop_watchdog_timer_on(cpu);
+
+ cpumask_clear_cpu(cpu, &wd_cpus_enabled);
+ wd_smp_clear_cpu_pending(cpu, get_tb());
+
+ return 0;
+}
+
+static void watchdog_calc_timeouts(void)
+{
+ wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq;
+
+ /* Have the SMP detector trigger a bit later */
+ wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2;
+
+ /* 2/5 is the factor that the perf based detector uses */
+ wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
+}
+
+void watchdog_nmi_reconfigure(void)
+{
+ int cpu;
+
+ watchdog_calc_timeouts();
+
+ for_each_cpu(cpu, &wd_cpus_enabled)
+ stop_wd_on_cpu(cpu);
+
+ for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
+ start_wd_on_cpu(cpu);
+}
+
+/*
+ * This runs after lockup_detector_init() which sets up watchdog_cpumask.
+ */
+static int __init powerpc_watchdog_init(void)
+{
+ int err;
+
+ watchdog_calc_timeouts();
+
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online",
+ start_wd_on_cpu, stop_wd_on_cpu);
+ if (err < 0)
+ pr_warn("Watchdog could not be initialized");
+
+ return 0;
+}
+arch_initcall(powerpc_watchdog_init);
+
+static void handle_backtrace_ipi(struct pt_regs *regs)
+{
+ nmi_cpu_backtrace(regs);
+}
+
+static void raise_backtrace_ipi(cpumask_t *mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, mask) {
+ if (cpu == smp_processor_id())
+ handle_backtrace_ipi(NULL);
+ else
+ smp_send_nmi_ipi(cpu, handle_backtrace_ipi, 1000000);
+ }
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
+}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 710e491206ed..8cb0190e2a73 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -93,7 +93,7 @@ int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order)
}
if (!hpt)
- hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT
+ hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL
|__GFP_NOWARN, order - PAGE_SHIFT);
if (!hpt)
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index f3917705c686..41cf5ae273cf 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -233,192 +233,192 @@ static long calc_offset(struct fixup_entry *entry, unsigned int *p)
static void test_basic_patching(void)
{
- extern unsigned int ftr_fixup_test1;
- extern unsigned int end_ftr_fixup_test1;
- extern unsigned int ftr_fixup_test1_orig;
- extern unsigned int ftr_fixup_test1_expected;
- int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
+ extern unsigned int ftr_fixup_test1[];
+ extern unsigned int end_ftr_fixup_test1[];
+ extern unsigned int ftr_fixup_test1_orig[];
+ extern unsigned int ftr_fixup_test1_expected[];
+ int size = end_ftr_fixup_test1 - ftr_fixup_test1;
fixup.value = fixup.mask = 8;
- fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
- fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
+ fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1);
+ fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2);
fixup.alt_start_off = fixup.alt_end_off = 0;
/* Sanity check */
- check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
+ check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
/* Check we don't patch if the value matches */
patch_feature_section(8, &fixup);
- check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
+ check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
/* Check we do patch if the value doesn't match */
patch_feature_section(0, &fixup);
- check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
+ check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
/* Check we do patch if the mask doesn't match */
- memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
- check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
+ memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size);
+ check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
patch_feature_section(~8, &fixup);
- check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
+ check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
}
static void test_alternative_patching(void)
{
- extern unsigned int ftr_fixup_test2;
- extern unsigned int end_ftr_fixup_test2;
- extern unsigned int ftr_fixup_test2_orig;
- extern unsigned int ftr_fixup_test2_alt;
- extern unsigned int ftr_fixup_test2_expected;
- int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
+ extern unsigned int ftr_fixup_test2[];
+ extern unsigned int end_ftr_fixup_test2[];
+ extern unsigned int ftr_fixup_test2_orig[];
+ extern unsigned int ftr_fixup_test2_alt[];
+ extern unsigned int ftr_fixup_test2_expected[];
+ int size = end_ftr_fixup_test2 - ftr_fixup_test2;
fixup.value = fixup.mask = 0xF;
- fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
- fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
- fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
- fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
+ fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1);
+ fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2);
+ fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt);
+ fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1);
/* Sanity check */
- check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
+ check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
/* Check we don't patch if the value matches */
patch_feature_section(0xF, &fixup);
- check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
+ check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
/* Check we do patch if the value doesn't match */
patch_feature_section(0, &fixup);
- check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
+ check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
/* Check we do patch if the mask doesn't match */
- memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
- check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
+ memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size);
+ check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
patch_feature_section(~0xF, &fixup);
- check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
+ check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
}
static void test_alternative_case_too_big(void)
{
- extern unsigned int ftr_fixup_test3;
- extern unsigned int end_ftr_fixup_test3;
- extern unsigned int ftr_fixup_test3_orig;
- extern unsigned int ftr_fixup_test3_alt;
- int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
+ extern unsigned int ftr_fixup_test3[];
+ extern unsigned int end_ftr_fixup_test3[];
+ extern unsigned int ftr_fixup_test3_orig[];
+ extern unsigned int ftr_fixup_test3_alt[];
+ int size = end_ftr_fixup_test3 - ftr_fixup_test3;
fixup.value = fixup.mask = 0xC;
- fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
- fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
- fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
- fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
+ fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1);
+ fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2);
+ fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt);
+ fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2);
/* Sanity check */
- check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
+ check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
/* Expect nothing to be patched, and the error returned to us */
check(patch_feature_section(0xF, &fixup) == 1);
- check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
+ check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
check(patch_feature_section(0, &fixup) == 1);
- check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
+ check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
check(patch_feature_section(~0xF, &fixup) == 1);
- check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
+ check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
}
static void test_alternative_case_too_small(void)
{
- extern unsigned int ftr_fixup_test4;
- extern unsigned int end_ftr_fixup_test4;
- extern unsigned int ftr_fixup_test4_orig;
- extern unsigned int ftr_fixup_test4_alt;
- extern unsigned int ftr_fixup_test4_expected;
- int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
+ extern unsigned int ftr_fixup_test4[];
+ extern unsigned int end_ftr_fixup_test4[];
+ extern unsigned int ftr_fixup_test4_orig[];
+ extern unsigned int ftr_fixup_test4_alt[];
+ extern unsigned int ftr_fixup_test4_expected[];
+ int size = end_ftr_fixup_test4 - ftr_fixup_test4;
unsigned long flag;
/* Check a high-bit flag */
flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
fixup.value = fixup.mask = flag;
- fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
- fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
- fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
- fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
+ fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1);
+ fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5);
+ fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt);
+ fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2);
/* Sanity check */
- check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
+ check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
/* Check we don't patch if the value matches */
patch_feature_section(flag, &fixup);
- check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
+ check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
/* Check we do patch if the value doesn't match */
patch_feature_section(0, &fixup);
- check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
+ check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
/* Check we do patch if the mask doesn't match */
- memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
- check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
+ memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size);
+ check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
patch_feature_section(~flag, &fixup);
- check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
+ check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
}
static void test_alternative_case_with_branch(void)
{
- extern unsigned int ftr_fixup_test5;
- extern unsigned int end_ftr_fixup_test5;
- extern unsigned int ftr_fixup_test5_expected;
- int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
+ extern unsigned int ftr_fixup_test5[];
+ extern unsigned int end_ftr_fixup_test5[];
+ extern unsigned int ftr_fixup_test5_expected[];
+ int size = end_ftr_fixup_test5 - ftr_fixup_test5;
- check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
+ check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
}
static void test_alternative_case_with_external_branch(void)
{
- extern unsigned int ftr_fixup_test6;
- extern unsigned int end_ftr_fixup_test6;
- extern unsigned int ftr_fixup_test6_expected;
- int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
+ extern unsigned int ftr_fixup_test6[];
+ extern unsigned int end_ftr_fixup_test6[];
+ extern unsigned int ftr_fixup_test6_expected[];
+ int size = end_ftr_fixup_test6 - ftr_fixup_test6;
- check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
+ check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
}
static void test_cpu_macros(void)
{
- extern u8 ftr_fixup_test_FTR_macros;
- extern u8 ftr_fixup_test_FTR_macros_expected;
- unsigned long size = &ftr_fixup_test_FTR_macros_expected -
- &ftr_fixup_test_FTR_macros;
+ extern u8 ftr_fixup_test_FTR_macros[];
+ extern u8 ftr_fixup_test_FTR_macros_expected[];
+ unsigned long size = ftr_fixup_test_FTR_macros_expected -
+ ftr_fixup_test_FTR_macros;
/* The fixups have already been done for us during boot */
- check(memcmp(&ftr_fixup_test_FTR_macros,
- &ftr_fixup_test_FTR_macros_expected, size) == 0);
+ check(memcmp(ftr_fixup_test_FTR_macros,
+ ftr_fixup_test_FTR_macros_expected, size) == 0);
}
static void test_fw_macros(void)
{
#ifdef CONFIG_PPC64
- extern u8 ftr_fixup_test_FW_FTR_macros;
- extern u8 ftr_fixup_test_FW_FTR_macros_expected;
- unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
- &ftr_fixup_test_FW_FTR_macros;
+ extern u8 ftr_fixup_test_FW_FTR_macros[];
+ extern u8 ftr_fixup_test_FW_FTR_macros_expected[];
+ unsigned long size = ftr_fixup_test_FW_FTR_macros_expected -
+ ftr_fixup_test_FW_FTR_macros;
/* The fixups have already been done for us during boot */
- check(memcmp(&ftr_fixup_test_FW_FTR_macros,
- &ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
+ check(memcmp(ftr_fixup_test_FW_FTR_macros,
+ ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
#endif
}
static void test_lwsync_macros(void)
{
- extern u8 lwsync_fixup_test;
- extern u8 end_lwsync_fixup_test;
- extern u8 lwsync_fixup_test_expected_LWSYNC;
- extern u8 lwsync_fixup_test_expected_SYNC;
- unsigned long size = &end_lwsync_fixup_test -
- &lwsync_fixup_test;
+ extern u8 lwsync_fixup_test[];
+ extern u8 end_lwsync_fixup_test[];
+ extern u8 lwsync_fixup_test_expected_LWSYNC[];
+ extern u8 lwsync_fixup_test_expected_SYNC[];
+ unsigned long size = end_lwsync_fixup_test -
+ lwsync_fixup_test;
/* The fixups have already been done for us during boot */
if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
- check(memcmp(&lwsync_fixup_test,
- &lwsync_fixup_test_expected_LWSYNC, size) == 0);
+ check(memcmp(lwsync_fixup_test,
+ lwsync_fixup_test_expected_LWSYNC, size) == 0);
} else {
- check(memcmp(&lwsync_fixup_test,
- &lwsync_fixup_test_expected_SYNC, size) == 0);
+ check(memcmp(lwsync_fixup_test,
+ lwsync_fixup_test_expected_SYNC, size) == 0);
}
}
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 33117f8a0882..ee33327686ae 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -683,8 +683,10 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
case 19:
switch ((instr >> 1) & 0x3ff) {
case 0: /* mcrf */
- rd = (instr >> 21) & 0x1c;
- ra = (instr >> 16) & 0x1c;
+ rd = 7 - ((instr >> 23) & 0x7);
+ ra = 7 - ((instr >> 18) & 0x7);
+ rd *= 4;
+ ra *= 4;
val = (regs->ccr >> ra) & 0xf;
regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
goto instr_done;
@@ -964,6 +966,19 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
#endif
case 19: /* mfcr */
+ if ((instr >> 20) & 1) {
+ imm = 0xf0000000UL;
+ for (sh = 0; sh < 8; ++sh) {
+ if (instr & (0x80000 >> sh)) {
+ regs->gpr[rd] = regs->ccr & imm;
+ break;
+ }
+ imm >>= 4;
+ }
+
+ goto instr_done;
+ }
+
regs->gpr[rd] = regs->ccr;
regs->gpr[rd] &= 0xffffffffUL;
goto instr_done;
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 0ee6be4f1ba4..5d78b193fec4 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -34,16 +34,9 @@
/*
* Top of mmap area (just below the process stack).
*
- * Leave at least a ~128 MB hole on 32bit applications.
- *
- * On 64bit applications we randomise the stack by 1GB so we need to
- * space our mmap start address by a further 1GB, otherwise there is a
- * chance the mmap area will end up closer to the stack than our ulimit
- * requires.
+ * Leave at least a ~128 MB hole.
*/
-#define MIN_GAP32 (128*1024*1024)
-#define MIN_GAP64 ((128 + 1024)*1024*1024UL)
-#define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64)
+#define MIN_GAP (128*1024*1024)
#define MAX_GAP (TASK_SIZE/6*5)
static inline int mmap_is_legacy(void)
@@ -71,9 +64,26 @@ unsigned long arch_mmap_rnd(void)
return rnd << PAGE_SHIFT;
}
+static inline unsigned long stack_maxrandom_size(void)
+{
+ if (!(current->flags & PF_RANDOMIZE))
+ return 0;
+
+ /* 8MB for 32bit, 1GB for 64bit */
+ if (is_32bit_task())
+ return (1<<23);
+ else
+ return (1<<30);
+}
+
static inline unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
+ unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
+
+ /* Values close to RLIM_INFINITY can overflow. */
+ if (gap + pad > gap)
+ gap += pad;
if (gap < MIN_GAP)
gap = MIN_GAP;
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 71de2c6d88f3..abed1fe6992f 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -138,6 +138,14 @@ static int radix__init_new_context(struct mm_struct *mm)
rts_field = radix__get_tree_size();
process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
+ /*
+ * Order the above store with subsequent update of the PID
+ * register (at which point HW can start loading/caching
+ * the entry) and the corresponding load by the MMU from
+ * the L2 cache.
+ */
+ asm volatile("ptesync;isync" : : : "memory");
+
mm->context.npu_context = NULL;
return index;
@@ -223,9 +231,15 @@ void destroy_context(struct mm_struct *mm)
mm->context.cop_lockp = NULL;
#endif /* CONFIG_PPC_ICSWX */
- if (radix_enabled())
- process_tb[mm->context.id].prtb1 = 0;
- else
+ if (radix_enabled()) {
+ /*
+ * Radix doesn't have a valid bit in the process table
+ * entries. However we know that at least P9 implementation
+ * will avoid caching an entry with an invalid RTS field,
+ * and 0 is invalid. So this will do.
+ */
+ process_tb[mm->context.id].prtb0 = 0;
+ } else
subpage_prot_free(mm);
destroy_pagetable_page(mm);
__destroy_context(mm->context.id);
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 8125160be7bc..3f3aa9a7063a 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -90,13 +90,15 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
* MMCRA[SDAR_MODE] will be set to 0b01
* For rest
* MMCRA[SDAR_MODE] will be set from event code.
+ * If sdar_mode from event is zero, default to 0b01. Hardware
+ * requires that we set a non-zero value.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
- else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
+ else if (!cpu_has_feature(CPU_FTR_POWER9_DD1) && p9_SDAR_MODE(event))
*mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
- else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ else
*mmcra |= MMCRA_SDAR_MODE_TLB;
} else
*mmcra |= MMCRA_SDAR_MODE_TLB;
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
index 80204e064362..50689180a6c1 100644
--- a/arch/powerpc/perf/power9-events-list.h
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -51,8 +51,12 @@ EVENT(PM_DTLB_MISS, 0x300fc)
EVENT(PM_ITLB_MISS, 0x400fc)
/* Run_Instructions */
EVENT(PM_RUN_INST_CMPL, 0x500fa)
+/* Alternate event code for PM_RUN_INST_CMPL */
+EVENT(PM_RUN_INST_CMPL_ALT, 0x400fa)
/* Run_cycles */
EVENT(PM_RUN_CYC, 0x600f4)
+/* Alternate event code for Run_cycles */
+EVENT(PM_RUN_CYC_ALT, 0x200f4)
/* Instruction Dispatched */
EVENT(PM_INST_DISP, 0x200f2)
EVENT(PM_INST_DISP_ALT, 0x300f2)
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index f17435e4a489..2280cf87ff9c 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -107,6 +107,8 @@ extern struct attribute_group isa207_pmu_format_group;
/* Table of alternatives, sorted by column 0 */
static const unsigned int power9_event_alternatives[][MAX_ALT] = {
{ PM_INST_DISP, PM_INST_DISP_ALT },
+ { PM_RUN_CYC_ALT, PM_RUN_CYC },
+ { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
};
static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index d8af9bc0489f..9558d725a99b 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -605,6 +605,24 @@ static const match_table_t spufs_tokens = {
{ Opt_err, NULL },
};
+static int spufs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct spufs_sb_info *sbi = spufs_get_sb_info(root->d_sb);
+ struct inode *inode = root->d_inode;
+
+ if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, inode->i_uid));
+ if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, inode->i_gid));
+ if ((inode->i_mode & S_IALLUGO) != 0775)
+ seq_printf(m, ",mode=%o", inode->i_mode);
+ if (sbi->debug)
+ seq_puts(m, ",debug");
+ return 0;
+}
+
static int
spufs_parse_options(struct super_block *sb, char *options, struct inode *root)
{
@@ -724,11 +742,9 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
.destroy_inode = spufs_destroy_inode,
.statfs = simple_statfs,
.evict_inode = spufs_evict_inode,
- .show_options = generic_show_options,
+ .show_options = spufs_show_options,
};
- save_mount_options(sb, data);
-
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 59684b4af4d1..9b87abb178f0 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -59,6 +59,8 @@ static struct task_struct *kopald_tsk;
void opal_configure_cores(void)
{
+ u64 reinit_flags = 0;
+
/* Do the actual re-init, This will clobber all FPRs, VRs, etc...
*
* It will preserve non volatile GPRs and HSPRG0/1. It will
@@ -66,11 +68,24 @@ void opal_configure_cores(void)
* but it might clobber a bunch.
*/
#ifdef __BIG_ENDIAN__
- opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
+ reinit_flags |= OPAL_REINIT_CPUS_HILE_BE;
#else
- opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE);
+ reinit_flags |= OPAL_REINIT_CPUS_HILE_LE;
#endif
+ /*
+ * POWER9 always support running hash:
+ * ie. Host hash supports hash guests
+ * Host radix supports hash/radix guests
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH;
+ if (early_radix_enabled())
+ reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX;
+ }
+
+ opal_reinit_cpus(reinit_flags);
+
/* Restore some bits */
if (cur_cpu_spec->cpu_restore)
cur_cpu_spec->cpu_restore();
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 2dc7e5fb86c3..897aa1400eb8 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -225,6 +225,8 @@ static void pnv_kexec_wait_secondaries_down(void)
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
+ u64 reinit_flags;
+
if (xive_enabled())
xive_kexec_teardown_cpu(secondary);
else
@@ -254,8 +256,15 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
* We might be running as little-endian - now that interrupts
* are disabled, reset the HILE bit to big-endian so we don't
* take interrupts in the wrong endian later
+ *
+ * We reinit to enable both radix and hash on P9 to ensure
+ * the mode used by the next kernel is always supported.
*/
- opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
+ reinit_flags = OPAL_REINIT_CPUS_HILE_BE;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX |
+ OPAL_REINIT_CPUS_MMU_HASH;
+ opal_reinit_cpus(reinit_flags);
}
}
#endif /* CONFIG_KEXEC_CORE */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 28b528197cf5..304cfe44df50 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -249,9 +249,6 @@ int __put_user_bad(void) __attribute__((noreturn));
int __get_user_bad(void) __attribute__((noreturn));
-#define __put_user_unaligned __put_user
-#define __get_user_unaligned __get_user
-
unsigned long __must_check
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 49a6bd45957b..3d0b14afa232 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -246,6 +246,7 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(lowcore_ptr);
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
+ mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
}
void machine_shutdown(void)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 3ae756c0db3d..3d1d808ea8a9 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -496,11 +496,6 @@ static void __init setup_memory_end(void)
pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
}
-static void __init setup_vmcoreinfo(void)
-{
- mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
-}
-
#ifdef CONFIG_CRASH_DUMP
/*
@@ -939,7 +934,6 @@ void __init setup_arch(char **cmdline_p)
#endif
setup_resources();
- setup_vmcoreinfo();
setup_lowcore();
smp_fill_possible_mask();
cpu_detect_mhz_feature();
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 590c91ae7541..1a6f9c39feef 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,39 +1,20 @@
-
-generic-y += bitsperlong.h
generic-y += clkdev.h
generic-y += current.h
generic-y += delay.h
generic-y += div64.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
-generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
generic-y += parport.h
generic-y += percpu.h
-generic-y += poll.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += rwsem.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += shmbuf.h
generic-y += sizes.h
-generic-y += socket.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += trace_clock.h
-generic-y += ucontext.h
generic-y += xor.h
diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h
index 1b77f068be2b..c9828f785ca0 100644
--- a/arch/sh/include/asm/bug.h
+++ b/arch/sh/include/asm/bug.h
@@ -48,6 +48,7 @@ do { \
"i" (__FILE__), \
"i" (__LINE__), "i" (0), \
"i" (sizeof(struct bug_entry))); \
+ unreachable(); \
} while (0)
#define __WARN_FLAGS(flags) \
diff --git a/arch/sh/include/asm/flat.h b/arch/sh/include/asm/flat.h
index 5d84df5e27f6..275fcae23539 100644
--- a/arch/sh/include/asm/flat.h
+++ b/arch/sh/include/asm/flat.h
@@ -12,11 +12,22 @@
#ifndef __ASM_SH_FLAT_H
#define __ASM_SH_FLAT_H
+#include <asm/unaligned.h>
+
#define flat_argvp_envp_on_stack() 0
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp)
-#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr, u32 *persistent)
+{
+ *addr = get_unaligned((__force u32 *)rp);
+ return 0;
+}
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+ put_unaligned(addr, (__force u32 *)rp);
+ return 0;
+}
#define flat_get_relocate_addr(rel) (rel)
#define flat_set_persistent(relval, p) ({ (void)p; 0; })
diff --git a/arch/sh/include/asm/stackprotector.h b/arch/sh/include/asm/stackprotector.h
index d9df3a76847c..141515a43b78 100644
--- a/arch/sh/include/asm/stackprotector.h
+++ b/arch/sh/include/asm/stackprotector.h
@@ -19,6 +19,7 @@ static __always_inline void boot_init_stack_canary(void)
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= LINUX_VERSION_CODE;
+ canary &= CANARY_MASK;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild
index b55fc2ae1e8c..e28531333efa 100644
--- a/arch/sh/include/uapi/asm/Kbuild
+++ b/arch/sh/include/uapi/asm/Kbuild
@@ -1,4 +1,22 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += kvm_para.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
generic-y += siginfo.h
+generic-y += socket.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += ucontext.h
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index d94dadedf74f..445b5e69b73c 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -234,7 +234,7 @@ static void sh64_icache_inv_current_user_range(unsigned long start, unsigned lon
#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
-static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
+static inline void sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
{
/* Purge all ways in a particular block of sets, specified by the base
set number and number of sets. Can handle wrap-around, if that's
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index e9e837bc3158..80ddc01f57ac 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -18,5 +18,4 @@ generic-y += preempt.h
generic-y += rwsem.h
generic-y += serial.h
generic-y += trace_clock.h
-generic-y += types.h
generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h
index 26ad2b2607c6..284eac3ffaf2 100644
--- a/arch/sparc/include/asm/nmi.h
+++ b/arch/sparc/include/asm/nmi.h
@@ -7,6 +7,7 @@ void nmi_adjust_hz(unsigned int new_hz);
extern atomic_t nmi_active;
+void arch_touch_nmi_watchdog(void);
void start_nmi_watchdog(void *unused);
void stop_nmi_watchdog(void *unused);
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
index ec9c04de3664..ff05992dae7a 100644
--- a/arch/sparc/include/asm/trap_block.h
+++ b/arch/sparc/include/asm/trap_block.h
@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
void init_cur_cpu_trap(struct thread_info *);
void setup_tba(void);
extern int ncpus_probed;
+extern u64 cpu_mondo_counter[NR_CPUS];
unsigned long real_hard_smp_processor_id(void);
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 113d84eaa15e..6d4c997d1a9e 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -9,7 +9,6 @@
#include <linux/string.h>
#include <asm/asi.h>
#include <asm/spitfire.h>
-#include <asm-generic/uaccess-unaligned.h>
#include <asm/extable_64.h>
#include <asm/processor.h>
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild
index b15bf6bc0e94..2178c78c7c1a 100644
--- a/arch/sparc/include/uapi/asm/Kbuild
+++ b/arch/sparc/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += types.h
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index e4b4e790bf89..fa466ce45bc9 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -205,7 +205,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
- base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_REPEAT);
+ base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!base)
return NULL;
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 95e73c63c99d..048ad783ea3f 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -51,7 +51,7 @@ static DEFINE_PER_CPU(unsigned int, last_irq_sum);
static DEFINE_PER_CPU(long, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
-void touch_nmi_watchdog(void)
+void arch_touch_nmi_watchdog(void)
{
if (atomic_read(&nmi_active)) {
int cpu;
@@ -61,10 +61,8 @@ void touch_nmi_watchdog(void)
per_cpu(nmi_touch, cpu) = 1;
}
}
-
- touch_softlockup_watchdog();
}
-EXPORT_SYMBOL(touch_nmi_watchdog);
+EXPORT_SYMBOL(arch_touch_nmi_watchdog);
static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
{
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 24f21c726dfa..f10e2f712394 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -673,12 +673,14 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
static int dma_4v_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
- u64 dma_addr_mask;
+ u64 dma_addr_mask = iommu->dma_addr_mask;
- if (device_mask > DMA_BIT_MASK(32) && iommu->atu)
- dma_addr_mask = iommu->atu->dma_addr_mask;
- else
- dma_addr_mask = iommu->dma_addr_mask;
+ if (device_mask > DMA_BIT_MASK(32)) {
+ if (iommu->atu)
+ dma_addr_mask = iommu->atu->dma_addr_mask;
+ else
+ return 0;
+ }
if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1;
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index fdf31040a7dc..3218bc43302e 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -622,22 +622,48 @@ retry:
}
}
-/* Multi-cpu list version. */
+#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
+#define MONDO_USEC_WAIT_MIN 2
+#define MONDO_USEC_WAIT_MAX 100
+#define MONDO_RETRY_LIMIT 500000
+
+/* Multi-cpu list version.
+ *
+ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
+ * Sometimes not all cpus receive the mondo, requiring us to re-send
+ * the mondo until all cpus have received, or cpus are truly stuck
+ * unable to receive mondo, and we timeout.
+ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
+ * perform guest service, such as PCIe error handling. Consider the
+ * service time, 1 second overall wait is reasonable for 1 cpu.
+ * Here two in-between mondo check wait time are defined: 2 usec for
+ * single cpu quick turn around and up to 100usec for large cpu count.
+ * Deliver mondo to large number of cpus could take longer, we adjusts
+ * the retry count as long as target cpus are making forward progress.
+ */
static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
{
- int retries, this_cpu, prev_sent, i, saw_cpu_error;
+ int this_cpu, tot_cpus, prev_sent, i, rem;
+ int usec_wait, retries, tot_retries;
+ u16 first_cpu = 0xffff;
+ unsigned long xc_rcvd = 0;
unsigned long status;
+ int ecpuerror_id = 0;
+ int enocpu_id = 0;
u16 *cpu_list;
+ u16 cpu;
this_cpu = smp_processor_id();
-
cpu_list = __va(tb->cpu_list_pa);
-
- saw_cpu_error = 0;
- retries = 0;
+ usec_wait = cnt * MONDO_USEC_WAIT_MIN;
+ if (usec_wait > MONDO_USEC_WAIT_MAX)
+ usec_wait = MONDO_USEC_WAIT_MAX;
+ retries = tot_retries = 0;
+ tot_cpus = cnt;
prev_sent = 0;
+
do {
- int forward_progress, n_sent;
+ int n_sent, mondo_delivered, target_cpu_busy;
status = sun4v_cpu_mondo_send(cnt,
tb->cpu_list_pa,
@@ -645,94 +671,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
/* HV_EOK means all cpus received the xcall, we're done. */
if (likely(status == HV_EOK))
- break;
+ goto xcall_done;
+
+ /* If not these non-fatal errors, panic */
+ if (unlikely((status != HV_EWOULDBLOCK) &&
+ (status != HV_ECPUERROR) &&
+ (status != HV_ENOCPU)))
+ goto fatal_errors;
/* First, see if we made any forward progress.
*
+ * Go through the cpu_list, count the target cpus that have
+ * received our mondo (n_sent), and those that did not (rem).
+ * Re-pack cpu_list with the cpus remain to be retried in the
+ * front - this simplifies tracking the truly stalled cpus.
+ *
* The hypervisor indicates successful sends by setting
* cpu list entries to the value 0xffff.
+ *
+ * EWOULDBLOCK means some target cpus did not receive the
+ * mondo and retry usually helps.
+ *
+ * ECPUERROR means at least one target cpu is in error state,
+ * it's usually safe to skip the faulty cpu and retry.
+ *
+ * ENOCPU means one of the target cpu doesn't belong to the
+ * domain, perhaps offlined which is unexpected, but not
+ * fatal and it's okay to skip the offlined cpu.
*/
+ rem = 0;
n_sent = 0;
for (i = 0; i < cnt; i++) {
- if (likely(cpu_list[i] == 0xffff))
+ cpu = cpu_list[i];
+ if (likely(cpu == 0xffff)) {
n_sent++;
+ } else if ((status == HV_ECPUERROR) &&
+ (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
+ ecpuerror_id = cpu + 1;
+ } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
+ enocpu_id = cpu + 1;
+ } else {
+ cpu_list[rem++] = cpu;
+ }
}
- forward_progress = 0;
- if (n_sent > prev_sent)
- forward_progress = 1;
+ /* No cpu remained, we're done. */
+ if (rem == 0)
+ break;
- prev_sent = n_sent;
+ /* Otherwise, update the cpu count for retry. */
+ cnt = rem;
- /* If we get a HV_ECPUERROR, then one or more of the cpus
- * in the list are in error state. Use the cpu_state()
- * hypervisor call to find out which cpus are in error state.
+ /* Record the overall number of mondos received by the
+ * first of the remaining cpus.
*/
- if (unlikely(status == HV_ECPUERROR)) {
- for (i = 0; i < cnt; i++) {
- long err;
- u16 cpu;
+ if (first_cpu != cpu_list[0]) {
+ first_cpu = cpu_list[0];
+ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+ }
- cpu = cpu_list[i];
- if (cpu == 0xffff)
- continue;
+ /* Was any mondo delivered successfully? */
+ mondo_delivered = (n_sent > prev_sent);
+ prev_sent = n_sent;
- err = sun4v_cpu_state(cpu);
- if (err == HV_CPU_STATE_ERROR) {
- saw_cpu_error = (cpu + 1);
- cpu_list[i] = 0xffff;
- }
- }
- } else if (unlikely(status != HV_EWOULDBLOCK))
- goto fatal_mondo_error;
+ /* or, was any target cpu busy processing other mondos? */
+ target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
+ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
- /* Don't bother rewriting the CPU list, just leave the
- * 0xffff and non-0xffff entries in there and the
- * hypervisor will do the right thing.
- *
- * Only advance timeout state if we didn't make any
- * forward progress.
+ /* Retry count is for no progress. If we're making progress,
+ * reset the retry count.
*/
- if (unlikely(!forward_progress)) {
- if (unlikely(++retries > 10000))
- goto fatal_mondo_timeout;
-
- /* Delay a little bit to let other cpus catch up
- * on their cpu mondo queue work.
- */
- udelay(2 * cnt);
+ if (likely(mondo_delivered || target_cpu_busy)) {
+ tot_retries += retries;
+ retries = 0;
+ } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
+ goto fatal_mondo_timeout;
}
- } while (1);
- if (unlikely(saw_cpu_error))
- goto fatal_mondo_cpu_error;
+ /* Delay a little bit to let other cpus catch up on
+ * their cpu mondo queue work.
+ */
+ if (!mondo_delivered)
+ udelay(usec_wait);
- return;
+ retries++;
+ } while (1);
-fatal_mondo_cpu_error:
- printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
- "(including %d) were in error state\n",
- this_cpu, saw_cpu_error - 1);
+xcall_done:
+ if (unlikely(ecpuerror_id > 0)) {
+ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
+ this_cpu, ecpuerror_id - 1);
+ } else if (unlikely(enocpu_id > 0)) {
+ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
+ this_cpu, enocpu_id - 1);
+ }
return;
+fatal_errors:
+ /* fatal errors include bad alignment, etc */
+ pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
+ this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+ panic("Unexpected SUN4V mondo error %lu\n", status);
+
fatal_mondo_timeout:
- printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
- " progress after %d retries.\n",
- this_cpu, retries);
- goto dump_cpu_list_and_out;
-
-fatal_mondo_error:
- printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
- this_cpu, status);
- printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
- "mondo_block_pa(%lx)\n",
- this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
-
-dump_cpu_list_and_out:
- printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
- for (i = 0; i < cnt; i++)
- printk("%u ", cpu_list[i]);
- printk("]\n");
+ /* some cpus being non-responsive to the cpu mondo */
+ pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
+ this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
+ panic("SUN4V mondo timeout panic\n");
}
static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
index 559bc5e9c199..34631995859a 100644
--- a/arch/sparc/kernel/sun4v_ivec.S
+++ b/arch/sparc/kernel/sun4v_ivec.S
@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
+ /* Get smp_processor_id() into %g3 */
+ sethi %hi(trap_block), %g5
+ or %g5, %lo(trap_block), %g5
+ sub %g4, %g5, %g3
+ srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
+
+ /* Increment cpu_mondo_counter[smp_processor_id()] */
+ sethi %hi(cpu_mondo_counter), %g5
+ or %g5, %lo(cpu_mondo_counter), %g5
+ sllx %g3, 3, %g3
+ add %g5, %g3, %g5
+ ldx [%g5], %g3
+ add %g3, 1, %g3
+ stx %g3, [%g5]
+
/* Get CPU mondo queue base phys address into %g7. */
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 196ee5eb4d48..ad31af1dd726 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2733,6 +2733,7 @@ void do_getpsr(struct pt_regs *regs)
}
}
+u64 cpu_mondo_counter[NR_CPUS] = {0};
struct trap_per_cpu trap_block[NR_CPUS];
EXPORT_SYMBOL(trap_block);
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 16f0b08c8ce9..d28d2b8932c7 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -2,37 +2,18 @@ generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
-generic-y += fcntl.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += msgbuf.h
-generic-y += param.h
generic-y += parport.h
-generic-y += poll.h
-generic-y += posix_types.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += seccomp.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += shmbuf.h
-generic-y += shmparam.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += trace_clock.h
-generic-y += types.h
generic-y += xor.h
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index d0c79c1c54b4..cb4fbe7e4f88 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -19,7 +19,6 @@
* User space memory access functions
*/
#include <linux/mm.h>
-#include <asm-generic/uaccess-unaligned.h>
#include <asm/processor.h>
#include <asm/page.h>
diff --git a/arch/tile/include/uapi/arch/abi.h b/arch/tile/include/uapi/arch/abi.h
index c55a3d432644..328e62260272 100644
--- a/arch/tile/include/uapi/arch/abi.h
+++ b/arch/tile/include/uapi/arch/abi.h
@@ -20,58 +20,17 @@
#ifndef __ARCH_ABI_H__
-#if !defined __need_int_reg_t && !defined __DOXYGEN__
-# define __ARCH_ABI_H__
-# include <arch/chip.h>
-#endif
-
-/* Provide the basic machine types. */
-#ifndef __INT_REG_BITS
-
-/** Number of bits in a register. */
-#if defined __tilegx__
-# define __INT_REG_BITS 64
-#elif defined __tilepro__
-# define __INT_REG_BITS 32
-#elif !defined __need_int_reg_t
+#ifndef __tile__ /* support uncommon use of arch headers in non-tile builds */
# include <arch/chip.h>
# define __INT_REG_BITS CHIP_WORD_SIZE()
-#else
-# error Unrecognized architecture with __need_int_reg_t
-#endif
-
-#if __INT_REG_BITS == 64
-
-#ifndef __ASSEMBLER__
-/** Unsigned type that can hold a register. */
-typedef unsigned long long __uint_reg_t;
-
-/** Signed type that can hold a register. */
-typedef long long __int_reg_t;
-#endif
-
-/** String prefix to use for printf(). */
-#define __INT_REG_FMT "ll"
-
-#else
-
-#ifndef __ASSEMBLER__
-/** Unsigned type that can hold a register. */
-typedef unsigned long __uint_reg_t;
-
-/** Signed type that can hold a register. */
-typedef long __int_reg_t;
-#endif
-
-/** String prefix to use for printf(). */
-#define __INT_REG_FMT "l"
-
#endif
-#endif /* __INT_REG_BITS */
+#include <arch/intreg.h>
+/* __need_int_reg_t is deprecated: just include <arch/intreg.h> */
#ifndef __need_int_reg_t
+#define __ARCH_ABI_H__
#ifndef __ASSEMBLER__
/** Unsigned type that can hold a register. */
diff --git a/arch/tile/include/uapi/arch/intreg.h b/arch/tile/include/uapi/arch/intreg.h
new file mode 100644
index 000000000000..1cf2fbf74306
--- /dev/null
+++ b/arch/tile/include/uapi/arch/intreg.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2017 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * @file
+ *
+ * Provide types and defines for the type that can hold a register,
+ * in the implementation namespace.
+ */
+
+#ifndef __ARCH_INTREG_H__
+#define __ARCH_INTREG_H__
+
+/*
+ * Get number of bits in a register. __INT_REG_BITS may be defined
+ * prior to including this header to force a particular bit width.
+ */
+
+#ifndef __INT_REG_BITS
+# if defined __tilegx__
+# define __INT_REG_BITS 64
+# elif defined __tilepro__
+# define __INT_REG_BITS 32
+# else
+# error Unrecognized architecture
+# endif
+#endif
+
+#if __INT_REG_BITS == 64
+
+# ifndef __ASSEMBLER__
+/** Unsigned type that can hold a register. */
+typedef unsigned long long __uint_reg_t;
+
+/** Signed type that can hold a register. */
+typedef long long __int_reg_t;
+# endif
+
+/** String prefix to use for printf(). */
+# define __INT_REG_FMT "ll"
+
+#elif __INT_REG_BITS == 32
+
+# ifndef __ASSEMBLER__
+/** Unsigned type that can hold a register. */
+typedef unsigned long __uint_reg_t;
+
+/** Signed type that can hold a register. */
+typedef long __int_reg_t;
+# endif
+
+/** String prefix to use for printf(). */
+# define __INT_REG_FMT "l"
+
+#else
+# error Unrecognized value of __INT_REG_BITS
+#endif
+
+#endif /* !__ARCH_INTREG_H__ */
diff --git a/arch/tile/include/uapi/asm/Kbuild b/arch/tile/include/uapi/asm/Kbuild
index 0c74c3c5ebfa..5711de0a1b5e 100644
--- a/arch/tile/include/uapi/asm/Kbuild
+++ b/arch/tile/include/uapi/asm/Kbuild
@@ -1,4 +1,23 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
generic-y += ucontext.h
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 3a97e4d7205c..5f757e04bcd2 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -857,36 +857,6 @@ void __init mem_init(void)
#endif
}
-/*
- * this is for the non-NUMA, single node SMP system case.
- * Specifically, in the case of x86, we will always add
- * memory to the highmem for now.
- */
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-int arch_add_memory(u64 start, u64 size, bool for_device)
-{
- struct pglist_data *pgdata = &contig_page_data;
- struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
- unsigned long start_pfn = start >> PAGE_SHIFT;
- unsigned long nr_pages = size >> PAGE_SHIFT;
-
- return __add_pages(zone, start_pfn, nr_pages);
-}
-
-int remove_memory(u64 start, u64 size)
-{
- return -EINVAL;
-}
-
-#ifdef CONFIG_MEMORY_HOTREMOVE
-int arch_remove_memory(u64 start, u64 size)
-{
- /* TODO */
- return -EBUSY;
-}
-#endif
-#endif
-
struct kmem_cache *pgd_cache;
void __init pgtable_cache_init(void)
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 0ca46ededfc7..6ca4f66085c1 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -59,10 +59,14 @@ KBUILD_CPPFLAGS += -I$(srctree)/$(HOST_DIR)/um
# Same things for in6addr_loopback and mktime - found in libc. For these two we
# only get link-time error, luckily.
#
+# -Dlongjmp=kernel_longjmp prevents anything from referencing the libpthread.a
+# embedded copy of longjmp, same thing for setjmp.
+#
# These apply to USER_CFLAGS to.
KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ \
$(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \
+ -Dlongjmp=kernel_longjmp -Dsetjmp=kernel_setjmp \
-Din6addr_loopback=kernel_in6addr_loopback \
-Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 7b361f36ca96..c90817b04da9 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -192,6 +192,9 @@ __uml_exitcall(console_exit);
static int console_chan_setup(char *str)
{
+ if (!strncmp(str, "sole=", 5)) /* console= option specifies tty */
+ return 0;
+
line_setup(vt_conf, MAX_TTYS, &def_conf, str, "console");
return 1;
}
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index 133055311dce..9e6d5997cfc4 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -15,7 +15,7 @@
PROVIDE (_unprotected_end = .);
. = ALIGN(4096);
- .note : { *(.note.*) }
+ NOTES
EXCEPTION_TABLE(0)
BUG_TABLE
diff --git a/arch/um/include/asm/io.h b/arch/um/include/asm/io.h
new file mode 100644
index 000000000000..8f35d574f35b
--- /dev/null
+++ b/arch/um/include/asm/io.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_UM_IO_H
+#define _ASM_UM_IO_H
+
+#define ioremap ioremap
+static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
+{
+ return (void __iomem *)(unsigned long)offset;
+}
+
+#define iounmap iounmap
+static inline void iounmap(void __iomem *addr)
+{
+}
+
+#include <asm-generic/io.h>
+
+#endif
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index cd1fa97776c3..574e03fc7ba2 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -242,6 +242,10 @@ extern void setup_hostinfo(char *buf, int len);
extern void os_dump_core(void) __attribute__ ((noreturn));
extern void um_early_printk(const char *s, unsigned int n);
extern void os_fix_helper_signals(void);
+extern void os_info(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+extern void os_warn(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
/* time.c */
extern void os_idle_sleep(unsigned long long nsecs);
diff --git a/arch/um/include/shared/skas/stub-data.h b/arch/um/include/shared/skas/stub-data.h
index a9deece956bf..13f404e1262b 100644
--- a/arch/um/include/shared/skas/stub-data.h
+++ b/arch/um/include/shared/skas/stub-data.h
@@ -8,8 +8,6 @@
#ifndef __STUB_DATA_H
#define __STUB_DATA_H
-#include <time.h>
-
struct stub_data {
unsigned long offset;
int fd;
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 4c9861b421fd..f02596e9931d 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -89,8 +89,8 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end,
offset = uml_reserved - uml_physmem;
map_size = len - offset;
if(map_size <= 0) {
- printf("Too few physical memory! Needed=%d, given=%d\n",
- offset, len);
+ os_warn("Too few physical memory! Needed=%lu, given=%lu\n",
+ offset, len);
exit(1);
}
@@ -99,9 +99,9 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end,
err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
map_size, 1, 1, 1);
if (err < 0) {
- printf("setup_physmem - mapping %ld bytes of memory at 0x%p "
- "failed - errno = %d\n", map_size,
- (void *) uml_reserved, err);
+ os_warn("setup_physmem - mapping %ld bytes of memory at 0x%p "
+ "failed - errno = %d\n", map_size,
+ (void *) uml_reserved, err);
exit(1);
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 59158871b9fc..4e6fcb32620f 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -183,6 +183,16 @@ void fatal_sigsegv(void)
os_dump_core();
}
+/**
+ * segv_handler() - the SIGSEGV handler
+ * @sig: the signal number
+ * @unused_si: the signal info struct; unused in this handler
+ * @regs: the ptrace register information
+ *
+ * The handler first extracts the faultinfo from the UML ptrace regs struct.
+ * If the userfault did not happen in an UML userspace process, bad_segv is called.
+ * Otherwise the signal did happen in a cloned userspace process, handle it.
+ */
void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
struct faultinfo * fi = UPT_FAULTINFO(regs);
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 7b5640117325..f433690b9b37 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -34,7 +34,7 @@ static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
static void __init add_arg(char *arg)
{
if (strlen(command_line) + strlen(arg) + 1 > COMMAND_LINE_SIZE) {
- printf("add_arg: Too many command line arguments!\n");
+ os_warn("add_arg: Too many command line arguments!\n");
exit(1);
}
if (strlen(command_line) > 0)
@@ -120,6 +120,7 @@ static const char *usage_string =
static int __init uml_version_setup(char *line, int *add)
{
+ /* Explicitly use printf() to show version in stdout */
printf("%s\n", init_utsname()->release);
exit(0);
@@ -148,8 +149,8 @@ __uml_setup("root=", uml_root_setup,
static int __init no_skas_debug_setup(char *line, int *add)
{
- printf("'debug' is not necessary to gdb UML in skas mode - run \n");
- printf("'gdb linux'\n");
+ os_warn("'debug' is not necessary to gdb UML in skas mode - run\n");
+ os_warn("'gdb linux'\n");
return 0;
}
@@ -165,6 +166,7 @@ static int __init Usage(char *line, int *add)
printf(usage_string, init_utsname()->release);
p = &__uml_help_start;
+ /* Explicitly use printf() to show help in stdout */
while (p < &__uml_help_end) {
printf("%s", *p);
p++;
@@ -283,8 +285,8 @@ int __init linux_main(int argc, char **argv)
diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
if (diff > 1024 * 1024) {
- printf("Adding %ld bytes to physical memory to account for "
- "exec-shield gap\n", diff);
+ os_info("Adding %ld bytes to physical memory to account for "
+ "exec-shield gap\n", diff);
physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
}
@@ -324,8 +326,8 @@ int __init linux_main(int argc, char **argv)
end_vm = start_vm + virtmem_size;
if (virtmem_size < physmem_size)
- printf("Kernel virtual memory size shrunk to %lu bytes\n",
- virtmem_size);
+ os_info("Kernel virtual memory size shrunk to %lu bytes\n",
+ virtmem_size);
os_flush_stdout();
diff --git a/arch/um/kernel/umid.c b/arch/um/kernel/umid.c
index f6cc3bd61781..10bf4aca529f 100644
--- a/arch/um/kernel/umid.c
+++ b/arch/um/kernel/umid.c
@@ -16,14 +16,14 @@ static int __init set_umid_arg(char *name, int *add)
int err;
if (umid_inited) {
- printf("umid already set\n");
+ os_warn("umid already set\n");
return 0;
}
*add = 0;
err = set_umid(name);
if (err == -EEXIST)
- printf("umid '%s' already in use\n", name);
+ os_warn("umid '%s' already in use\n", name);
else if (!err)
umid_inited = 1;
diff --git a/arch/um/os-Linux/execvp.c b/arch/um/os-Linux/execvp.c
index 8fb25ca07c46..84a0777c2a45 100644
--- a/arch/um/os-Linux/execvp.c
+++ b/arch/um/os-Linux/execvp.c
@@ -136,7 +136,7 @@ int main(int argc, char**argv)
int ret;
argc--;
if (!argc) {
- fprintf(stderr, "Not enough arguments\n");
+ os_warn("Not enough arguments\n");
return 1;
}
argv++;
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index 9d499de87e63..5f970ece5ac3 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -74,8 +74,8 @@ static void install_fatal_handler(int sig)
action.sa_restorer = NULL;
action.sa_handler = last_ditch_exit;
if (sigaction(sig, &action, NULL) < 0) {
- printf("failed to install handler for signal %d - errno = %d\n",
- sig, errno);
+ os_warn("failed to install handler for signal %d "
+ "- errno = %d\n", sig, errno);
exit(1);
}
}
@@ -175,7 +175,7 @@ int __init main(int argc, char **argv, char **envp)
/* disable SIGIO for the fds and set SIGIO to be ignored */
err = deactivate_all_fds();
if (err)
- printf("deactivate_all_fds failed, errno = %d\n", -err);
+ os_warn("deactivate_all_fds failed, errno = %d\n", -err);
/*
* Let any pending signals fire now. This ensures
@@ -184,14 +184,13 @@ int __init main(int argc, char **argv, char **envp)
*/
unblock_signals();
+ os_info("\n");
/* Reboot */
if (ret) {
- printf("\n");
execvp(new_argv[0], new_argv);
perror("Failed to exec kernel");
ret = 1;
}
- printf("\n");
return uml_exitcode;
}
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index 8b1767668515..e162a95ad7dd 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -25,13 +25,13 @@ static int __init check_tmpfs(const char *dir)
{
struct statfs st;
- printf("Checking if %s is on tmpfs...", dir);
+ os_info("Checking if %s is on tmpfs...", dir);
if (statfs(dir, &st) < 0) {
- printf("%s\n", strerror(errno));
+ os_info("%s\n", strerror(errno));
} else if (st.f_type != TMPFS_MAGIC) {
- printf("no\n");
+ os_info("no\n");
} else {
- printf("OK\n");
+ os_info("OK\n");
return 0;
}
return -1;
@@ -61,18 +61,18 @@ static char * __init choose_tempdir(void)
int i;
const char *dir;
- printf("Checking environment variables for a tempdir...");
+ os_info("Checking environment variables for a tempdir...");
for (i = 0; vars[i]; i++) {
dir = getenv(vars[i]);
if ((dir != NULL) && (*dir != '\0')) {
- printf("%s\n", dir);
+ os_info("%s\n", dir);
if (check_tmpfs(dir) >= 0)
goto done;
else
goto warn;
}
}
- printf("none found\n");
+ os_info("none found\n");
for (i = 0; tmpfs_dirs[i]; i++) {
dir = tmpfs_dirs[i];
@@ -82,7 +82,7 @@ static char * __init choose_tempdir(void)
dir = fallback_dir;
warn:
- printf("Warning: tempdir %s is not on tmpfs\n", dir);
+ os_warn("Warning: tempdir %s is not on tmpfs\n", dir);
done:
/* Make a copy since getenv results may not remain valid forever. */
return strdup(dir);
@@ -100,7 +100,7 @@ static int __init make_tempfile(const char *template)
if (tempdir == NULL) {
tempdir = choose_tempdir();
if (tempdir == NULL) {
- fprintf(stderr, "Failed to choose tempdir: %s\n",
+ os_warn("Failed to choose tempdir: %s\n",
strerror(errno));
return -1;
}
@@ -125,7 +125,7 @@ static int __init make_tempfile(const char *template)
strcat(tempname, template);
fd = mkstemp(tempname);
if (fd < 0) {
- fprintf(stderr, "open - cannot create %s: %s\n", tempname,
+ os_warn("open - cannot create %s: %s\n", tempname,
strerror(errno));
goto out;
}
@@ -194,16 +194,16 @@ void __init check_tmpexec(void)
addr = mmap(NULL, UM_KERN_PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, fd, 0);
- printf("Checking PROT_EXEC mmap in %s...", tempdir);
+ os_info("Checking PROT_EXEC mmap in %s...", tempdir);
if (addr == MAP_FAILED) {
err = errno;
- printf("%s\n", strerror(err));
+ os_warn("%s\n", strerror(err));
close(fd);
if (err == EPERM)
- printf("%s must be not mounted noexec\n", tempdir);
+ os_warn("%s must be not mounted noexec\n", tempdir);
exit(1);
}
- printf("OK\n");
+ os_info("OK\n");
munmap(addr, UM_KERN_PAGE_SIZE);
close(fd);
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 03b3c4cc7735..819d68656673 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -108,7 +108,7 @@ static void get_skas_faultinfo(int pid, struct faultinfo *fi)
wait_stub_done(pid);
/*
- * faultinfo is prepared by the stub-segv-handler at start of
+ * faultinfo is prepared by the stub_segv_handler at start of
* the stub stack page. We just have to copy it.
*/
memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
@@ -175,6 +175,21 @@ static void handle_trap(int pid, struct uml_pt_regs *regs,
extern char __syscall_stub_start[];
+/**
+ * userspace_tramp() - userspace trampoline
+ * @stack: pointer to the new userspace stack page, can be NULL, if? FIXME:
+ *
+ * The userspace trampoline is used to setup a new userspace process in start_userspace() after it was clone()'ed.
+ * This function will run on a temporary stack page.
+ * It ptrace()'es itself, then
+ * Two pages are mapped into the userspace address space:
+ * - STUB_CODE (with EXEC), which contains the skas stub code
+ * - STUB_DATA (with R/W), which contains a data page that is used to transfer certain data between the UML userspace process and the UML kernel.
+ * Also for the userspace process a SIGSEGV handler is installed to catch pagefaults in the userspace process.
+ * And last the process stops itself to give control to the UML kernel for this userspace process.
+ *
+ * Return: Always zero, otherwise the current userspace process is ended with non null exit() call
+ */
static int userspace_tramp(void *stack)
{
void *addr;
@@ -236,12 +251,24 @@ static int userspace_tramp(void *stack)
int userspace_pid[NR_CPUS];
+/**
+ * start_userspace() - prepare a new userspace process
+ * @stub_stack: pointer to the stub stack. Can be NULL, if? FIXME:
+ *
+ * Setups a new temporary stack page that is used while userspace_tramp() runs
+ * Clones the kernel process into a new userspace process, with FDs only.
+ *
+ * Return: When positive: the process id of the new userspace process,
+ * when negative: an error number.
+ * FIXME: can PIDs become negative?!
+ */
int start_userspace(unsigned long stub_stack)
{
void *stack;
unsigned long sp;
int pid, status, n, flags, err;
+ /* setup a temporary stack page */
stack = mmap(NULL, UM_KERN_PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -252,10 +279,12 @@ int start_userspace(unsigned long stub_stack)
return err;
}
+ /* set stack pointer to the end of the stack page, so it can grow downwards */
sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
flags = CLONE_FILES | SIGCHLD;
+ /* clone into new userspace process */
pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
if (pid < 0) {
err = -errno;
@@ -323,11 +352,17 @@ void userspace(struct uml_pt_regs *regs)
* fail. In this case, there is nothing to do but
* just kill the process.
*/
- if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))
+ if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) {
+ printk(UM_KERN_ERR "userspace - ptrace set regs "
+ "failed, errno = %d\n", errno);
fatal_sigsegv();
+ }
- if (put_fp_registers(pid, regs->fp))
+ if (put_fp_registers(pid, regs->fp)) {
+ printk(UM_KERN_ERR "userspace - ptrace set fp regs "
+ "failed, errno = %d\n", errno);
fatal_sigsegv();
+ }
/* Now we set local_using_sysemu to be used for one loop */
local_using_sysemu = get_using_sysemu();
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 22a358ef1b0c..b1b6b75c5b17 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -166,7 +166,7 @@ static void __init check_sysemu(void)
unsigned long regs[MAX_REG_NR];
int pid, n, status, count=0;
- non_fatal("Checking syscall emulation patch for ptrace...");
+ os_info("Checking syscall emulation patch for ptrace...");
sysemu_supported = 0;
pid = start_ptraced_child();
@@ -199,10 +199,10 @@ static void __init check_sysemu(void)
goto fail_stopped;
sysemu_supported = 1;
- non_fatal("OK\n");
+ os_info("OK\n");
set_using_sysemu(!force_sysemu_disabled);
- non_fatal("Checking advanced syscall emulation patch for ptrace...");
+ os_info("Checking advanced syscall emulation patch for ptrace...");
pid = start_ptraced_child();
if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0,
@@ -244,7 +244,7 @@ static void __init check_sysemu(void)
goto fail_stopped;
sysemu_supported = 2;
- non_fatal("OK\n");
+ os_info("OK\n");
if (!force_sysemu_disabled)
set_using_sysemu(sysemu_supported);
@@ -260,7 +260,7 @@ static void __init check_ptrace(void)
{
int pid, syscall, n, status;
- non_fatal("Checking that ptrace can change system call numbers...");
+ os_info("Checking that ptrace can change system call numbers...");
pid = start_ptraced_child();
if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0,
@@ -292,7 +292,7 @@ static void __init check_ptrace(void)
}
}
stop_ptraced_child(pid, 0, 1);
- non_fatal("OK\n");
+ os_info("OK\n");
check_sysemu();
}
@@ -308,15 +308,17 @@ static void __init check_coredump_limit(void)
return;
}
- printf("Core dump limits :\n\tsoft - ");
+ os_info("Core dump limits :\n\tsoft - ");
if (lim.rlim_cur == RLIM_INFINITY)
- printf("NONE\n");
- else printf("%lu\n", lim.rlim_cur);
+ os_info("NONE\n");
+ else
+ os_info("%llu\n", (unsigned long long)lim.rlim_cur);
- printf("\thard - ");
+ os_info("\thard - ");
if (lim.rlim_max == RLIM_INFINITY)
- printf("NONE\n");
- else printf("%lu\n", lim.rlim_max);
+ os_info("NONE\n");
+ else
+ os_info("%llu\n", (unsigned long long)lim.rlim_max);
}
void __init os_early_checks(void)
@@ -349,7 +351,7 @@ int __init parse_iomem(char *str, int *add)
driver = str;
file = strchr(str,',');
if (file == NULL) {
- fprintf(stderr, "parse_iomem : failed to parse iomem\n");
+ os_warn("parse_iomem : failed to parse iomem\n");
goto out;
}
*file = '\0';
diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c
index c1dc89261f67..998fbb445458 100644
--- a/arch/um/os-Linux/umid.c
+++ b/arch/um/os-Linux/umid.c
@@ -35,8 +35,9 @@ static int __init make_uml_dir(void)
err = -ENOENT;
if (home == NULL) {
- printk(UM_KERN_ERR "make_uml_dir : no value in "
- "environment for $HOME\n");
+ printk(UM_KERN_ERR
+ "%s: no value in environment for $HOME\n",
+ __func__);
goto err;
}
strlcpy(dir, home, sizeof(dir));
@@ -50,13 +51,15 @@ static int __init make_uml_dir(void)
err = -ENOMEM;
uml_dir = malloc(strlen(dir) + 1);
if (uml_dir == NULL) {
- printf("make_uml_dir : malloc failed, errno = %d\n", errno);
+ printk(UM_KERN_ERR "%s : malloc failed, errno = %d\n",
+ __func__, errno);
goto err;
}
strcpy(uml_dir, dir);
if ((mkdir(uml_dir, 0777) < 0) && (errno != EEXIST)) {
- printf("Failed to mkdir '%s': %s\n", uml_dir, strerror(errno));
+ printk(UM_KERN_ERR "Failed to mkdir '%s': %s\n",
+ uml_dir, strerror(errno));
err = -errno;
goto err_free;
}
@@ -351,7 +354,7 @@ char *get_umid(void)
static int __init set_uml_dir(char *name, int *add)
{
if (*name == '\0') {
- printf("uml_dir can't be an empty string\n");
+ os_warn("uml_dir can't be an empty string\n");
return 0;
}
@@ -362,7 +365,7 @@ static int __init set_uml_dir(char *name, int *add)
uml_dir = malloc(strlen(name) + 2);
if (uml_dir == NULL) {
- printf("Failed to malloc uml_dir - error = %d\n", errno);
+ os_warn("Failed to malloc uml_dir - error = %d\n", errno);
/*
* Return 0 here because do_initcalls doesn't look at
@@ -387,8 +390,8 @@ static void remove_umid_dir(void)
sprintf(dir, "%s%s", uml_dir, umid);
err = remove_files_and_dir(dir);
if (err)
- printf("remove_umid_dir - remove_files_and_dir failed with "
- "err = %d\n", err);
+ os_warn("%s - remove_files_and_dir failed with err = %d\n",
+ __func__, err);
}
__uml_exitcall(remove_umid_dir);
diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c
index faee55ef6d2f..8cc8b2617a67 100644
--- a/arch/um/os-Linux/util.c
+++ b/arch/um/os-Linux/util.c
@@ -13,6 +13,7 @@
#include <wait.h>
#include <sys/mman.h>
#include <sys/utsname.h>
+#include <init.h>
#include <os.h>
void stack_protections(unsigned long address)
@@ -152,3 +153,36 @@ void um_early_printk(const char *s, unsigned int n)
{
printf("%.*s", n, s);
}
+
+static int quiet_info;
+
+static int __init quiet_cmd_param(char *str, int *add)
+{
+ quiet_info = 1;
+ return 0;
+}
+
+__uml_setup("quiet", quiet_cmd_param,
+"quiet\n"
+" Turns off information messages during boot.\n\n");
+
+void os_info(const char *fmt, ...)
+{
+ va_list list;
+
+ if (quiet_info)
+ return;
+
+ va_start(list, fmt);
+ vfprintf(stderr, fmt, list);
+ va_end(list);
+}
+
+void os_warn(const char *fmt, ...)
+{
+ va_list list;
+
+ va_start(list, fmt);
+ vfprintf(stderr, fmt, list);
+ va_end(list);
+}
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 7a53a55341de..fda7e2153086 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -1,66 +1,38 @@
-
generic-y += atomic.h
-generic-y += auxvec.h
-generic-y += bitsperlong.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
-generic-y += fcntl.h
generic-y += ftrace.h
generic-y += futex.h
generic-y += hardirq.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
+generic-y += kprobes.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
generic-y += module.h
-generic-y += msgbuf.h
-generic-y += param.h
generic-y += parport.h
generic-y += percpu.h
-generic-y += poll.h
-generic-y += posix_types.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += sections.h
generic-y += segment.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += setup.h
-generic-y += shmbuf.h
-generic-y += shmparam.h
-generic-y += signal.h
generic-y += sizes.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += swab.h
generic-y += syscalls.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += types.h
-generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild
index 1c44d3b3eba0..759a71411169 100644
--- a/arch/unicore32/include/uapi/asm/Kbuild
+++ b/arch/unicore32/include/uapi/asm/Kbuild
@@ -1,5 +1,32 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
generic-y += kvm_para.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
generic-y += siginfo.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += swab.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 94a18681353d..781521b7cf9e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -50,6 +50,7 @@ config X86
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MMIO_FLUSH
@@ -162,6 +163,7 @@ config X86
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 00241c815524..a0838ab929f2 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -411,3 +411,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
debug_putstr("done.\nBooting the kernel.\n");
return output;
}
+
+void fortify_panic(const char *name)
+{
+ error("detected buffer overflow");
+}
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index fc61739150e7..f960a043cdeb 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
static bool avx2_usable(void)
{
- if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+ if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
&& boot_cpu_has(X86_FEATURE_BMI1)
&& boot_cpu_has(X86_FEATURE_BMI2))
return true;
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 2701e5f8145b..ca3c48c0872f 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -286,6 +286,7 @@
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
+#define X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE (15*32+15) /* Virtual VMLOAD VMSAVE */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index d2ff779f347e..796ff6c1aa53 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -33,7 +33,7 @@
#ifdef CONFIG_X86_32
-extern unsigned long asmlinkage efi_call_phys(void *, ...);
+extern asmlinkage unsigned long efi_call_phys(void *, ...);
#define arch_efi_call_virt_setup() kernel_fpu_begin()
#define arch_efi_call_virt_teardown() kernel_fpu_end()
@@ -52,7 +52,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
#define EFI_LOADER_SIGNATURE "EL64"
-extern u64 asmlinkage efi_call(void *fp, ...);
+extern asmlinkage u64 efi_call(void *fp, ...);
#define efi_call_phys(f, args...) efi_call((f), args)
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 34b984c60790..6cf65437b5e5 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -52,10 +52,10 @@ typedef u8 kprobe_opcode_t;
#define flush_insn_slot(p) do { } while (0)
/* optinsn template addresses */
-extern __visible kprobe_opcode_t optprobe_template_entry;
-extern __visible kprobe_opcode_t optprobe_template_val;
-extern __visible kprobe_opcode_t optprobe_template_call;
-extern __visible kprobe_opcode_t optprobe_template_end;
+extern __visible kprobe_opcode_t optprobe_template_entry[];
+extern __visible kprobe_opcode_t optprobe_template_val[];
+extern __visible kprobe_opcode_t optprobe_template_call[];
+extern __visible kprobe_opcode_t optprobe_template_end[];
#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
#define MAX_OPTINSN_SIZE \
(((unsigned long)&optprobe_template_end - \
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 722d0e568863..fde36f189836 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -23,6 +23,7 @@ struct x86_exception {
u16 error_code;
bool nested_page_fault;
u64 address; /* cr2 or nested page fault gpa */
+ u8 async_page_fault;
};
/*
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1588e9e3dc01..87ac4fba6d8e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -462,10 +462,12 @@ struct kvm_vcpu_hv_synic {
DECLARE_BITMAP(auto_eoi_bitmap, 256);
DECLARE_BITMAP(vec_bitmap, 256);
bool active;
+ bool dont_zero_synic_pages;
};
/* Hyper-V per vcpu emulation context */
struct kvm_vcpu_hv {
+ u32 vp_index;
u64 hv_vapic;
s64 runtime_offset;
struct kvm_vcpu_hv_synic synic;
@@ -549,6 +551,7 @@ struct kvm_vcpu_arch {
bool reinject;
u8 nr;
u32 error_code;
+ u8 nested_apf;
} exception;
struct kvm_queued_interrupt {
@@ -649,6 +652,9 @@ struct kvm_vcpu_arch {
u64 msr_val;
u32 id;
bool send_user_only;
+ u32 host_apf_reason;
+ unsigned long nested_apf_token;
+ bool delivery_as_pf_vmexit;
} apf;
/* OSVW MSRs (AMD only) */
@@ -803,6 +809,7 @@ struct kvm_arch {
int audit_point;
#endif
+ bool backwards_tsc_observed;
bool boot_vcpu_runs_old_kvmclock;
u32 bsp_vcpu_id;
@@ -952,9 +959,7 @@ struct kvm_x86_ops {
unsigned char *hypercall_addr);
void (*set_irq)(struct kvm_vcpu *vcpu);
void (*set_nmi)(struct kvm_vcpu *vcpu);
- void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
- bool has_error_code, u32 error_code,
- bool reinject);
+ void (*queue_exception)(struct kvm_vcpu *vcpu);
void (*cancel_injection)(struct kvm_vcpu *vcpu);
int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
int (*nmi_allowed)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index cb976bab6299..9ffc36bfe4cd 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -84,7 +84,7 @@ struct pv_init_ops {
*/
unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
unsigned long addr, unsigned len);
-};
+} __no_randomize_layout;
struct pv_lazy_ops {
@@ -92,12 +92,12 @@ struct pv_lazy_ops {
void (*enter)(void);
void (*leave)(void);
void (*flush)(void);
-};
+} __no_randomize_layout;
struct pv_time_ops {
unsigned long long (*sched_clock)(void);
unsigned long long (*steal_clock)(int cpu);
-};
+} __no_randomize_layout;
struct pv_cpu_ops {
/* hooks for various privileged instructions */
@@ -176,7 +176,7 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
-};
+} __no_randomize_layout;
struct pv_irq_ops {
/*
@@ -199,7 +199,7 @@ struct pv_irq_ops {
#ifdef CONFIG_X86_64
void (*adjust_exception_frame)(void);
#endif
-};
+} __no_randomize_layout;
struct pv_mmu_ops {
unsigned long (*read_cr2)(void);
@@ -305,7 +305,7 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
-};
+} __no_randomize_layout;
struct arch_spinlock;
#ifdef CONFIG_SMP
@@ -322,7 +322,7 @@ struct pv_lock_ops {
void (*kick)(int cpu);
struct paravirt_callee_save vcpu_is_preempted;
-};
+} __no_randomize_layout;
/* This contains all the paravirt structures: we get a convenient
* number for each function using the offset which we use to indicate
@@ -334,7 +334,7 @@ struct paravirt_patch_template {
struct pv_irq_ops pv_irq_ops;
struct pv_mmu_ops pv_mmu_ops;
struct pv_lock_ops pv_lock_ops;
-};
+} __no_randomize_layout;
extern struct pv_info pv_info;
extern struct pv_init_ops pv_init_ops;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6a79547e8ee0..028245e1c42b 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -129,7 +129,7 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
-};
+} __randomize_layout;
struct cpuid_regs {
u32 eax, ebx, ecx, edx;
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index dcbd9bcce714..8abedf1d650e 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -74,6 +74,7 @@ static __always_inline void boot_init_stack_canary(void)
get_random_bytes(&canary, sizeof(canary));
tsc = rdtsc();
canary += tsc + (tsc << 32UL);
+ canary &= CANARY_MASK;
current->stack_canary = canary;
#ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index 3d3e8353ee5c..e9ee84873de5 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -142,7 +142,9 @@ static __always_inline void *__constant_memcpy(void *to, const void *from,
}
#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *, const void *, size_t);
+#ifndef CONFIG_FORTIFY_SOURCE
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
@@ -195,11 +197,15 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
#endif
#endif
+#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMMOVE
void *memmove(void *dest, const void *src, size_t n);
+extern int memcmp(const void *, const void *, size_t);
+#ifndef CONFIG_FORTIFY_SOURCE
#define memcmp __builtin_memcmp
+#endif
#define __HAVE_ARCH_MEMCHR
extern void *memchr(const void *cs, int c, size_t count);
@@ -321,6 +327,8 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
: __memset_generic((s), (c), (count)))
#define __HAVE_ARCH_MEMSET
+extern void *memset(void *, int, size_t);
+#ifndef CONFIG_FORTIFY_SOURCE
#if (__GNUC__ >= 4)
#define memset(s, c, count) __builtin_memset(s, c, count)
#else
@@ -330,6 +338,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
(count)) \
: __memset((s), (c), (count)))
#endif
+#endif /* !CONFIG_FORTIFY_SOURCE */
/*
* find the first occurrence of byte 'c', or 1 past the area if none
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 1f22bc277c45..2a8c822de1fc 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -31,6 +31,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
extern void *memcpy(void *to, const void *from, size_t len);
extern void *__memcpy(void *to, const void *from, size_t len);
+#ifndef CONFIG_FORTIFY_SOURCE
#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
#define memcpy(dst, src, len) \
@@ -51,6 +52,7 @@ extern void *__memcpy(void *to, const void *from, size_t len);
*/
#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
#endif
+#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
@@ -77,6 +79,11 @@ int strcmp(const char *cs, const char *ct);
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
#endif
#define __HAVE_ARCH_MEMCPY_MCSAFE 1
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 14824fc78f7e..58fffe79e417 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -83,7 +83,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u32 event_inj;
u32 event_inj_err;
u64 nested_cr3;
- u64 lbr_ctl;
+ u64 virt_ext;
u32 clean;
u32 reserved_5;
u64 next_rip;
@@ -119,6 +119,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define AVIC_ENABLE_SHIFT 31
#define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
+#define LBR_CTL_ENABLE_MASK BIT_ULL(0)
+#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
+
#define SVM_INTERRUPT_SHADOW_MASK 1
#define SVM_IOIO_STR_SHIFT 2
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 476ea27f490b..30269dafec47 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -535,9 +535,6 @@ struct __large_struct { unsigned long buf[100]; };
#define __put_user(x, ptr) \
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-#define __get_user_unaligned __get_user
-#define __put_user_unaligned __put_user
-
/*
* {get|put}_user_try and catch
*
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index cff0bb6556f8..a965e5b0d328 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -67,6 +67,7 @@ struct kvm_clock_pairing {
#define KVM_ASYNC_PF_ENABLED (1 << 0)
#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
+#define KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT (1 << 2)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index c73c9fb281e1..d6f387780849 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -19,7 +19,7 @@
#include <linux/init.h>
#include <linux/delay.h>
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh)
{
return (u64)(cpu_khz) * 1000 * watchdog_thresh;
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 22217ece26c8..44404e2307bb 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -457,7 +457,7 @@ static int prepare_elf64_headers(struct crash_elf_data *ced,
bufp += sizeof(Elf64_Phdr);
phdr->p_type = PT_NOTE;
phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
- phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
+ phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
(ehdr->e_phnum)++;
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 43e10d6fdbed..71c17a5be983 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -330,7 +330,12 @@ static void kvm_guest_cpu_init(void)
#ifdef CONFIG_PREEMPT
pa |= KVM_ASYNC_PF_SEND_ALWAYS;
#endif
- wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
+ pa |= KVM_ASYNC_PF_ENABLED;
+
+ /* Async page fault support for L1 hypervisor is optional */
+ if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
+ (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
+ wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
__this_cpu_write(apf_reason.enabled, 1);
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
smp_processor_id());
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index ebae57ac5902..2695a34fa1c5 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -106,14 +106,27 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
return 0;
}
-static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vcpu_id)
+static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
+{
+ struct kvm_vcpu *vcpu = NULL;
+ int i;
+
+ if (vpidx < KVM_MAX_VCPUS)
+ vcpu = kvm_get_vcpu(kvm, vpidx);
+ if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
+ return vcpu;
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
+ return vcpu;
+ return NULL;
+}
+
+static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
{
struct kvm_vcpu *vcpu;
struct kvm_vcpu_hv_synic *synic;
- if (vcpu_id >= atomic_read(&kvm->online_vcpus))
- return NULL;
- vcpu = kvm_get_vcpu(kvm, vcpu_id);
+ vcpu = get_vcpu_by_vpidx(kvm, vpidx);
if (!vcpu)
return NULL;
synic = vcpu_to_synic(vcpu);
@@ -221,7 +234,8 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
synic->version = data;
break;
case HV_X64_MSR_SIEFP:
- if (data & HV_SYNIC_SIEFP_ENABLE)
+ if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
+ !synic->dont_zero_synic_pages)
if (kvm_clear_guest(vcpu->kvm,
data & PAGE_MASK, PAGE_SIZE)) {
ret = 1;
@@ -232,7 +246,8 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
synic_exit(synic, msr);
break;
case HV_X64_MSR_SIMP:
- if (data & HV_SYNIC_SIMP_ENABLE)
+ if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
+ !synic->dont_zero_synic_pages)
if (kvm_clear_guest(vcpu->kvm,
data & PAGE_MASK, PAGE_SIZE)) {
ret = 1;
@@ -318,11 +333,11 @@ static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
return ret;
}
-int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint)
+int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
{
struct kvm_vcpu_hv_synic *synic;
- synic = synic_get(kvm, vcpu_id);
+ synic = synic_get(kvm, vpidx);
if (!synic)
return -EINVAL;
@@ -341,11 +356,11 @@ void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
kvm_hv_notify_acked_sint(vcpu, i);
}
-static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vcpu_id, u32 sint, int gsi)
+static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
{
struct kvm_vcpu_hv_synic *synic;
- synic = synic_get(kvm, vcpu_id);
+ synic = synic_get(kvm, vpidx);
if (!synic)
return -EINVAL;
@@ -687,14 +702,24 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
stimer_init(&hv_vcpu->stimer[i], i);
}
-int kvm_hv_activate_synic(struct kvm_vcpu *vcpu)
+void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
+
+ hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
+}
+
+int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
{
+ struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
+
/*
* Hyper-V SynIC auto EOI SINT's are
* not compatible with APICV, so deactivate APICV
*/
kvm_vcpu_deactivate_apicv(vcpu);
- vcpu_to_synic(vcpu)->active = true;
+ synic->active = true;
+ synic->dont_zero_synic_pages = dont_zero_synic_pages;
return 0;
}
@@ -978,6 +1003,11 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
switch (msr) {
+ case HV_X64_MSR_VP_INDEX:
+ if (!host)
+ return 1;
+ hv->vp_index = (u32)data;
+ break;
case HV_X64_MSR_APIC_ASSIST_PAGE: {
u64 gfn;
unsigned long addr;
@@ -1089,18 +1119,9 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
switch (msr) {
- case HV_X64_MSR_VP_INDEX: {
- int r;
- struct kvm_vcpu *v;
-
- kvm_for_each_vcpu(r, v, vcpu->kvm) {
- if (v == vcpu) {
- data = r;
- break;
- }
- }
+ case HV_X64_MSR_VP_INDEX:
+ data = hv->vp_index;
break;
- }
case HV_X64_MSR_EOI:
return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
case HV_X64_MSR_ICR:
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index cd1119538add..e637631a9574 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -56,9 +56,10 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
void kvm_hv_irq_routing_update(struct kvm *kvm);
int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
-int kvm_hv_activate_synic(struct kvm_vcpu *vcpu);
+int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index a78b445ce411..af192895b1fc 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -724,8 +724,10 @@ void kvm_free_pit(struct kvm *kvm)
struct kvm_pit *pit = kvm->arch.vpit;
if (pit) {
+ mutex_lock(&kvm->slots_lock);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
+ mutex_unlock(&kvm->slots_lock);
kvm_pit_set_reinject(pit, false);
hrtimer_cancel(&pit->pit_state.timer);
kthread_destroy_worker(pit->worker);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index aafd399cf8c6..9b1dd114956a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -46,6 +46,7 @@
#include <asm/io.h>
#include <asm/vmx.h>
#include <asm/kvm_page_track.h>
+#include "trace.h"
/*
* When setting this variable to true it enables Two-Dimensional-Paging
@@ -3748,7 +3749,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
kvm_event_needs_reinjection(vcpu)))
return false;
- if (is_guest_mode(vcpu))
+ if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
return false;
return kvm_x86_ops->interrupt_allowed(vcpu);
@@ -3780,6 +3781,38 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
return false;
}
+int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
+ u64 fault_address, char *insn, int insn_len,
+ bool need_unprotect)
+{
+ int r = 1;
+
+ switch (vcpu->arch.apf.host_apf_reason) {
+ default:
+ trace_kvm_page_fault(fault_address, error_code);
+
+ if (need_unprotect && kvm_event_needs_reinjection(vcpu))
+ kvm_mmu_unprotect_page_virt(vcpu, fault_address);
+ r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
+ insn_len);
+ break;
+ case KVM_PV_REASON_PAGE_NOT_PRESENT:
+ vcpu->arch.apf.host_apf_reason = 0;
+ local_irq_disable();
+ kvm_async_pf_task_wait(fault_address);
+ local_irq_enable();
+ break;
+ case KVM_PV_REASON_PAGE_READY:
+ vcpu->arch.apf.host_apf_reason = 0;
+ local_irq_disable();
+ kvm_async_pf_task_wake(fault_address);
+ local_irq_enable();
+ break;
+ }
+ return r;
+}
+EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
+
static bool
check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
{
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index a276834950c1..d7d248a000dd 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -77,6 +77,9 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty);
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
+int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
+ u64 fault_address, char *insn, int insn_len,
+ bool need_unprotect);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 905ea6052517..4d8141e533c3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -194,7 +194,6 @@ struct vcpu_svm {
unsigned int3_injected;
unsigned long int3_rip;
- u32 apf_reason;
/* cached guest cpuid flags for faster access */
bool nrips_enabled : 1;
@@ -277,6 +276,10 @@ static int avic;
module_param(avic, int, S_IRUGO);
#endif
+/* enable/disable Virtual VMLOAD VMSAVE */
+static int vls = true;
+module_param(vls, int, 0444);
+
/* AVIC VM ID bit masks and lock */
static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR);
static DEFINE_SPINLOCK(avic_vm_id_lock);
@@ -633,11 +636,13 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
svm_set_interrupt_shadow(vcpu, 0);
}
-static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
- bool has_error_code, u32 error_code,
- bool reinject)
+static void svm_queue_exception(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ unsigned nr = vcpu->arch.exception.nr;
+ bool has_error_code = vcpu->arch.exception.has_error_code;
+ bool reinject = vcpu->arch.exception.reinject;
+ u32 error_code = vcpu->arch.exception.error_code;
/*
* If we are within a nested VM we'd better #VMEXIT and let the guest
@@ -947,7 +952,7 @@ static void svm_enable_lbrv(struct vcpu_svm *svm)
{
u32 *msrpm = svm->msrpm;
- svm->vmcb->control.lbr_ctl = 1;
+ svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
@@ -958,7 +963,7 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
{
u32 *msrpm = svm->msrpm;
- svm->vmcb->control.lbr_ctl = 0;
+ svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
@@ -1093,6 +1098,16 @@ static __init int svm_hardware_setup(void)
}
}
+ if (vls) {
+ if (!npt_enabled ||
+ !boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) ||
+ !IS_ENABLED(CONFIG_X86_64)) {
+ vls = false;
+ } else {
+ pr_info("Virtual VMLOAD VMSAVE supported\n");
+ }
+ }
+
return 0;
err:
@@ -1280,6 +1295,16 @@ static void init_vmcb(struct vcpu_svm *svm)
if (avic)
avic_init_vmcb(svm);
+ /*
+ * If hardware supports Virtual VMLOAD VMSAVE then enable it
+ * in VMCB and clear intercepts to avoid #VMEXIT.
+ */
+ if (vls) {
+ clr_intercept(svm, INTERCEPT_VMLOAD);
+ clr_intercept(svm, INTERCEPT_VMSAVE);
+ svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+ }
+
mark_all_dirty(svm->vmcb);
enable_gif(svm);
@@ -2096,34 +2121,11 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
static int pf_interception(struct vcpu_svm *svm)
{
u64 fault_address = svm->vmcb->control.exit_info_2;
- u64 error_code;
- int r = 1;
+ u64 error_code = svm->vmcb->control.exit_info_1;
- switch (svm->apf_reason) {
- default:
- error_code = svm->vmcb->control.exit_info_1;
-
- trace_kvm_page_fault(fault_address, error_code);
- if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
- kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
- r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
+ return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
svm->vmcb->control.insn_bytes,
- svm->vmcb->control.insn_len);
- break;
- case KVM_PV_REASON_PAGE_NOT_PRESENT:
- svm->apf_reason = 0;
- local_irq_disable();
- kvm_async_pf_task_wait(fault_address);
- local_irq_enable();
- break;
- case KVM_PV_REASON_PAGE_READY:
- svm->apf_reason = 0;
- local_irq_disable();
- kvm_async_pf_task_wake(fault_address);
- local_irq_enable();
- break;
- }
- return r;
+ svm->vmcb->control.insn_len, !npt_enabled);
}
static int db_interception(struct vcpu_svm *svm)
@@ -2267,7 +2269,7 @@ static int io_interception(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
- int size, in, string;
+ int size, in, string, ret;
unsigned port;
++svm->vcpu.stat.io_exits;
@@ -2279,10 +2281,16 @@ static int io_interception(struct vcpu_svm *svm)
port = io_info >> 16;
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
svm->next_rip = svm->vmcb->control.exit_info_2;
- skip_emulated_instruction(&svm->vcpu);
+ ret = kvm_skip_emulated_instruction(&svm->vcpu);
- return in ? kvm_fast_pio_in(vcpu, size, port)
- : kvm_fast_pio_out(vcpu, size, port);
+ /*
+ * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
+ * KVM_EXIT_DEBUG here.
+ */
+ if (in)
+ return kvm_fast_pio_in(vcpu, size, port) && ret;
+ else
+ return kvm_fast_pio_out(vcpu, size, port) && ret;
}
static int nmi_interception(struct vcpu_svm *svm)
@@ -2415,15 +2423,19 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
if (!is_guest_mode(&svm->vcpu))
return 0;
+ vmexit = nested_svm_intercept(svm);
+ if (vmexit != NESTED_EXIT_DONE)
+ return 0;
+
svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_info_1 = error_code;
- svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
-
- vmexit = nested_svm_intercept(svm);
- if (vmexit == NESTED_EXIT_DONE)
- svm->nested.exit_required = true;
+ if (svm->vcpu.arch.exception.nested_apf)
+ svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
+ else
+ svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
+ svm->nested.exit_required = true;
return vmexit;
}
@@ -2598,7 +2610,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
break;
case SVM_EXIT_EXCP_BASE + PF_VECTOR:
/* When we're shadowing, trap PFs, but not async PF */
- if (!npt_enabled && svm->apf_reason == 0)
+ if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
return NESTED_EXIT_HOST;
break;
default:
@@ -2645,7 +2657,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
}
/* async page fault always cause vmexit */
else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
- svm->apf_reason != 0)
+ svm->vcpu.arch.exception.nested_apf != 0)
vmexit = NESTED_EXIT_DONE;
break;
}
@@ -2702,7 +2714,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
dst->event_inj = from->event_inj;
dst->event_inj_err = from->event_inj_err;
dst->nested_cr3 = from->nested_cr3;
- dst->lbr_ctl = from->lbr_ctl;
+ dst->virt_ext = from->virt_ext;
}
static int nested_svm_vmexit(struct vcpu_svm *svm)
@@ -3008,7 +3020,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
/* We don't want to see VMMCALLs from a nested guest */
clr_intercept(svm, INTERCEPT_VMMCALL);
- svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
+ svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
svm->vmcb->control.int_state = nested_vmcb->control.int_state;
svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
@@ -3055,6 +3067,7 @@ static int vmload_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct page *page;
+ int ret;
if (nested_svm_check_permissions(svm))
return 1;
@@ -3064,18 +3077,19 @@ static int vmload_interception(struct vcpu_svm *svm)
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
- skip_emulated_instruction(&svm->vcpu);
+ ret = kvm_skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
nested_svm_unmap(page);
- return 1;
+ return ret;
}
static int vmsave_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct page *page;
+ int ret;
if (nested_svm_check_permissions(svm))
return 1;
@@ -3085,12 +3099,12 @@ static int vmsave_interception(struct vcpu_svm *svm)
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
- skip_emulated_instruction(&svm->vcpu);
+ ret = kvm_skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
nested_svm_unmap(page);
- return 1;
+ return ret;
}
static int vmrun_interception(struct vcpu_svm *svm)
@@ -3123,25 +3137,29 @@ failed:
static int stgi_interception(struct vcpu_svm *svm)
{
+ int ret;
+
if (nested_svm_check_permissions(svm))
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
- skip_emulated_instruction(&svm->vcpu);
+ ret = kvm_skip_emulated_instruction(&svm->vcpu);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
enable_gif(svm);
- return 1;
+ return ret;
}
static int clgi_interception(struct vcpu_svm *svm)
{
+ int ret;
+
if (nested_svm_check_permissions(svm))
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
- skip_emulated_instruction(&svm->vcpu);
+ ret = kvm_skip_emulated_instruction(&svm->vcpu);
disable_gif(svm);
@@ -3152,7 +3170,7 @@ static int clgi_interception(struct vcpu_svm *svm)
mark_dirty(svm->vmcb, VMCB_INTR);
}
- return 1;
+ return ret;
}
static int invlpga_interception(struct vcpu_svm *svm)
@@ -3166,8 +3184,7 @@ static int invlpga_interception(struct vcpu_svm *svm)
kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
- skip_emulated_instruction(&svm->vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(&svm->vcpu);
}
static int skinit_interception(struct vcpu_svm *svm)
@@ -3190,7 +3207,7 @@ static int xsetbv_interception(struct vcpu_svm *svm)
if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
- skip_emulated_instruction(&svm->vcpu);
+ return kvm_skip_emulated_instruction(&svm->vcpu);
}
return 1;
@@ -3286,8 +3303,7 @@ static int invlpg_interception(struct vcpu_svm *svm)
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
- skip_emulated_instruction(&svm->vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(&svm->vcpu);
}
static int emulate_on_interception(struct vcpu_svm *svm)
@@ -3437,9 +3453,7 @@ static int dr_interception(struct vcpu_svm *svm)
kvm_register_write(&svm->vcpu, reg, val);
}
- skip_emulated_instruction(&svm->vcpu);
-
- return 1;
+ return kvm_skip_emulated_instruction(&svm->vcpu);
}
static int cr8_write_interception(struct vcpu_svm *svm)
@@ -3562,6 +3576,7 @@ static int rdmsr_interception(struct vcpu_svm *svm)
if (svm_get_msr(&svm->vcpu, &msr_info)) {
trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(&svm->vcpu, 0);
+ return 1;
} else {
trace_kvm_msr_read(ecx, msr_info.data);
@@ -3570,9 +3585,8 @@ static int rdmsr_interception(struct vcpu_svm *svm)
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
msr_info.data >> 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
- skip_emulated_instruction(&svm->vcpu);
+ return kvm_skip_emulated_instruction(&svm->vcpu);
}
- return 1;
}
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
@@ -3698,11 +3712,11 @@ static int wrmsr_interception(struct vcpu_svm *svm)
if (kvm_set_msr(&svm->vcpu, &msr)) {
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(&svm->vcpu, 0);
+ return 1;
} else {
trace_kvm_msr_write(ecx, data);
- skip_emulated_instruction(&svm->vcpu);
+ return kvm_skip_emulated_instruction(&svm->vcpu);
}
- return 1;
}
static int msr_interception(struct vcpu_svm *svm)
@@ -3731,8 +3745,7 @@ static int pause_interception(struct vcpu_svm *svm)
static int nop_interception(struct vcpu_svm *svm)
{
- skip_emulated_instruction(&(svm->vcpu));
- return 1;
+ return kvm_skip_emulated_instruction(&(svm->vcpu));
}
static int monitor_interception(struct vcpu_svm *svm)
@@ -4117,7 +4130,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
- pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
+ pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
@@ -4965,7 +4978,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
/* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
- svm->apf_reason = kvm_read_and_reset_pf_reason();
+ svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
if (npt_enabled) {
vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f76efad248ab..84e62acf2dd8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2422,28 +2422,41 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
* KVM wants to inject page-faults which it got to the guest. This function
* checks whether in a nested guest, we need to inject them to L1 or L2.
*/
-static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
+static int nested_vmx_check_exception(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ unsigned int nr = vcpu->arch.exception.nr;
- if (!(vmcs12->exception_bitmap & (1u << nr)))
+ if (!((vmcs12->exception_bitmap & (1u << nr)) ||
+ (nr == PF_VECTOR && vcpu->arch.exception.nested_apf)))
return 0;
+ if (vcpu->arch.exception.nested_apf) {
+ vmcs_write32(VM_EXIT_INTR_ERROR_CODE, vcpu->arch.exception.error_code);
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
+ PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
+ INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
+ vcpu->arch.apf.nested_apf_token);
+ return 1;
+ }
+
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
return 1;
}
-static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
- bool has_error_code, u32 error_code,
- bool reinject)
+static void vmx_queue_exception(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned nr = vcpu->arch.exception.nr;
+ bool has_error_code = vcpu->arch.exception.has_error_code;
+ bool reinject = vcpu->arch.exception.reinject;
+ u32 error_code = vcpu->arch.exception.error_code;
u32 intr_info = nr | INTR_INFO_VALID_MASK;
if (!reinject && is_guest_mode(vcpu) &&
- nested_vmx_check_exception(vcpu, nr))
+ nested_vmx_check_exception(vcpu))
return;
if (has_error_code) {
@@ -3764,6 +3777,25 @@ static void free_kvm_area(void)
}
}
+enum vmcs_field_type {
+ VMCS_FIELD_TYPE_U16 = 0,
+ VMCS_FIELD_TYPE_U64 = 1,
+ VMCS_FIELD_TYPE_U32 = 2,
+ VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
+};
+
+static inline int vmcs_field_type(unsigned long field)
+{
+ if (0x1 & field) /* the *_HIGH fields are all 32 bit */
+ return VMCS_FIELD_TYPE_U32;
+ return (field >> 13) & 0x3 ;
+}
+
+static inline int vmcs_field_readonly(unsigned long field)
+{
+ return (((field >> 10) & 0x3) == 1);
+}
+
static void init_vmcs_shadow_fields(void)
{
int i, j;
@@ -3789,14 +3821,22 @@ static void init_vmcs_shadow_fields(void)
/* shadowed fields guest access without vmexit */
for (i = 0; i < max_shadow_read_write_fields; i++) {
- clear_bit(shadow_read_write_fields[i],
- vmx_vmwrite_bitmap);
- clear_bit(shadow_read_write_fields[i],
- vmx_vmread_bitmap);
+ unsigned long field = shadow_read_write_fields[i];
+
+ clear_bit(field, vmx_vmwrite_bitmap);
+ clear_bit(field, vmx_vmread_bitmap);
+ if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64) {
+ clear_bit(field + 1, vmx_vmwrite_bitmap);
+ clear_bit(field + 1, vmx_vmread_bitmap);
+ }
+ }
+ for (i = 0; i < max_shadow_read_only_fields; i++) {
+ unsigned long field = shadow_read_only_fields[i];
+
+ clear_bit(field, vmx_vmread_bitmap);
+ if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64)
+ clear_bit(field + 1, vmx_vmread_bitmap);
}
- for (i = 0; i < max_shadow_read_only_fields; i++)
- clear_bit(shadow_read_only_fields[i],
- vmx_vmread_bitmap);
}
static __init int alloc_kvm_area(void)
@@ -4634,6 +4674,11 @@ static bool guest_state_valid(struct kvm_vcpu *vcpu)
return true;
}
+static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
+{
+ return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
+}
+
static int init_rmode_tss(struct kvm *kvm)
{
gfn_t fn;
@@ -5664,14 +5709,11 @@ static int handle_exception(struct kvm_vcpu *vcpu)
}
if (is_page_fault(intr_info)) {
- /* EPT won't cause page fault directly */
- BUG_ON(enable_ept);
cr2 = vmcs_readl(EXIT_QUALIFICATION);
- trace_kvm_page_fault(cr2, error_code);
-
- if (kvm_event_needs_reinjection(vcpu))
- kvm_mmu_unprotect_page_virt(vcpu, cr2);
- return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
+ /* EPT won't cause page fault directly */
+ WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
+ return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0,
+ true);
}
ex_no = intr_info & INTR_INFO_VECTOR_MASK;
@@ -7214,25 +7256,6 @@ static int handle_vmresume(struct kvm_vcpu *vcpu)
return nested_vmx_run(vcpu, false);
}
-enum vmcs_field_type {
- VMCS_FIELD_TYPE_U16 = 0,
- VMCS_FIELD_TYPE_U64 = 1,
- VMCS_FIELD_TYPE_U32 = 2,
- VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
-};
-
-static inline int vmcs_field_type(unsigned long field)
-{
- if (0x1 & field) /* the *_HIGH fields are all 32 bit */
- return VMCS_FIELD_TYPE_U32;
- return (field >> 13) & 0x3 ;
-}
-
-static inline int vmcs_field_readonly(unsigned long field)
-{
- return (((field >> 10) & 0x3) == 1);
-}
-
/*
* Read a vmcs12 field. Since these can have varying lengths and we return
* one type, we chose the biggest type (u64) and zero-extend the return value
@@ -8014,7 +8037,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
if (is_nmi(intr_info))
return false;
else if (is_page_fault(intr_info))
- return enable_ept;
+ return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
else if (is_no_device(intr_info) &&
!(vmcs12->guest_cr0 & X86_CR0_TS))
return false;
@@ -8418,9 +8441,15 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
- vcpu->run->internal.ndata = 2;
+ vcpu->run->internal.ndata = 3;
vcpu->run->internal.data[0] = vectoring_info;
vcpu->run->internal.data[1] = exit_reason;
+ vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
+ if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
+ vcpu->run->internal.ndata++;
+ vcpu->run->internal.data[3] =
+ vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+ }
return 0;
}
@@ -8611,17 +8640,24 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
{
- u32 exit_intr_info;
+ u32 exit_intr_info = 0;
+ u16 basic_exit_reason = (u16)vmx->exit_reason;
- if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
- || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
+ if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
+ || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
return;
- vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
- exit_intr_info = vmx->exit_intr_info;
+ if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ vmx->exit_intr_info = exit_intr_info;
+
+ /* if exit due to PF check for async PF */
+ if (is_page_fault(exit_intr_info))
+ vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
/* Handle machine checks before interrupts are enabled */
- if (is_machine_check(exit_intr_info))
+ if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
+ is_machine_check(exit_intr_info))
kvm_machine_check();
/* We need to handle NMIs before interrupts are enabled */
@@ -9589,23 +9625,26 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
}
+static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+ return 0;
+
+ if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
+ !page_address_valid(vcpu, vmcs12->io_bitmap_b))
+ return -EINVAL;
+
+ return 0;
+}
+
static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
- int maxphyaddr;
- u64 addr;
-
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
return 0;
- if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) {
- WARN_ON(1);
- return -EINVAL;
- }
- maxphyaddr = cpuid_maxphyaddr(vcpu);
-
- if (!PAGE_ALIGNED(vmcs12->msr_bitmap) ||
- ((addr + PAGE_SIZE) >> maxphyaddr))
+ if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
return -EINVAL;
return 0;
@@ -10293,6 +10332,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
@@ -10429,8 +10471,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
return 1;
}
- vmcs12->launch_state = 1;
-
/*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
* we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
@@ -10804,6 +10844,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
+ vmcs12->launch_state = 1;
+
/* vm_entry_intr_info_field is cleared on exit. Emulate this
* instead of reading the real value. */
vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6c7266f7766d..5b8f07889f6a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -134,8 +134,6 @@ module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
static bool __read_mostly vector_hashing = true;
module_param(vector_hashing, bool, S_IRUGO);
-static bool __read_mostly backwards_tsc_observed = false;
-
#define KVM_NR_SHARED_MSRS 16
struct kvm_shared_msrs_global {
@@ -452,7 +450,12 @@ EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
++vcpu->stat.pf_guest;
- vcpu->arch.cr2 = fault->address;
+ vcpu->arch.exception.nested_apf =
+ is_guest_mode(vcpu) && fault->async_page_fault;
+ if (vcpu->arch.exception.nested_apf)
+ vcpu->arch.apf.nested_apf_token = fault->address;
+ else
+ vcpu->arch.cr2 = fault->address;
kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
}
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
@@ -1719,7 +1722,7 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
&ka->master_cycle_now);
ka->use_master_clock = host_tsc_clocksource && vcpus_matched
- && !backwards_tsc_observed
+ && !ka->backwards_tsc_observed
&& !ka->boot_vcpu_runs_old_kvmclock;
if (ka->use_master_clock)
@@ -2060,8 +2063,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
gpa_t gpa = data & ~0x3f;
- /* Bits 2:5 are reserved, Should be zero */
- if (data & 0x3c)
+ /* Bits 3:5 are reserved, Should be zero */
+ if (data & 0x38)
return 1;
vcpu->arch.apf.msr_val = data;
@@ -2077,6 +2080,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+ vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
kvm_async_pf_wakeup_all(vcpu);
return 0;
}
@@ -2661,6 +2665,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_HYPERV_VAPIC:
case KVM_CAP_HYPERV_SPIN:
case KVM_CAP_HYPERV_SYNIC:
+ case KVM_CAP_HYPERV_SYNIC2:
+ case KVM_CAP_HYPERV_VP_INDEX:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
@@ -3384,10 +3390,14 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return -EINVAL;
switch (cap->cap) {
+ case KVM_CAP_HYPERV_SYNIC2:
+ if (cap->args[0])
+ return -EINVAL;
case KVM_CAP_HYPERV_SYNIC:
if (!irqchip_in_kernel(vcpu->kvm))
return -EINVAL;
- return kvm_hv_activate_synic(vcpu);
+ return kvm_hv_activate_synic(vcpu, cap->cap ==
+ KVM_CAP_HYPERV_SYNIC2);
default:
return -EINVAL;
}
@@ -4188,9 +4198,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out;
r = 0;
+ /*
+ * TODO: userspace has to take care of races with VCPU_RUN, so
+ * kvm_gen_update_masterclock() can be cut down to locked
+ * pvclock_update_vm_gtod_copy().
+ */
+ kvm_gen_update_masterclock(kvm);
now_ns = get_kvmclock_ns(kvm);
kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
- kvm_gen_update_masterclock(kvm);
+ kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
break;
}
case KVM_GET_CLOCK: {
@@ -6347,10 +6363,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
kvm_update_dr7(vcpu);
}
- kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
- vcpu->arch.exception.has_error_code,
- vcpu->arch.exception.error_code,
- vcpu->arch.exception.reinject);
+ kvm_x86_ops->queue_exception(vcpu);
return 0;
}
@@ -7676,6 +7689,8 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
struct msr_data msr;
struct kvm *kvm = vcpu->kvm;
+ kvm_hv_vcpu_postcreate(vcpu);
+
if (vcpu_load(vcpu))
return;
msr.data = 0x0;
@@ -7829,8 +7844,8 @@ int kvm_arch_hardware_enable(void)
*/
if (backwards_tsc) {
u64 delta_cyc = max_tsc - local_tsc;
- backwards_tsc_observed = true;
list_for_each_entry(kvm, &vm_list, vm_list) {
+ kvm->arch.backwards_tsc_observed = true;
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.tsc_offset_adjustment += delta_cyc;
vcpu->arch.last_host_tsc = local_tsc;
@@ -8576,6 +8591,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
fault.error_code = 0;
fault.nested_page_fault = false;
fault.address = work->arch.token;
+ fault.async_page_fault = true;
kvm_inject_page_fault(vcpu, &fault);
}
}
@@ -8598,6 +8614,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
fault.error_code = 0;
fault.nested_page_fault = false;
fault.address = work->arch.token;
+ fault.async_page_fault = true;
kvm_inject_page_fault(vcpu, &fault);
}
vcpu->arch.apf.halted = false;
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index cad12634d6bd..2eab7d0bfedd 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -6,7 +6,7 @@
__visible void *memcpy(void *to, const void *from, size_t n)
{
-#ifdef CONFIG_X86_USE_3DNOW
+#if defined(CONFIG_X86_USE_3DNOW) && !defined(CONFIG_FORTIFY_SOURCE)
return __memcpy3d(to, from, n);
#else
return __memcpy(to, from, n);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 797295e792b2..229d04a83f85 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -92,13 +92,18 @@ unsigned long arch_mmap_rnd(void)
static unsigned long mmap_base(unsigned long rnd, unsigned long task_size)
{
unsigned long gap = rlimit(RLIMIT_STACK);
+ unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap;
unsigned long gap_min, gap_max;
+ /* Values close to RLIM_INFINITY can overflow. */
+ if (gap + pad > gap)
+ gap += pad;
+
/*
* Top of mmap area (just below the process stack).
* Leave an at least ~128 MB hole with possible stack randomization.
*/
- gap_min = SIZE_128M + stack_maxrandom_size(task_size);
+ gap_min = SIZE_128M;
gap_max = (task_size / 6) * 5;
if (gap < gap_min)
diff --git a/arch/x86/um/os-Linux/registers.c b/arch/x86/um/os-Linux/registers.c
index 00f54a91bb4b..28775f55bde2 100644
--- a/arch/x86/um/os-Linux/registers.c
+++ b/arch/x86/um/os-Linux/registers.c
@@ -26,6 +26,7 @@ int save_i387_registers(int pid, unsigned long *fp_regs)
int save_fp_registers(int pid, unsigned long *fp_regs)
{
+#ifdef PTRACE_GETREGSET
struct iovec iov;
if (have_xstate_support) {
@@ -34,9 +35,9 @@ int save_fp_registers(int pid, unsigned long *fp_regs)
if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
return -errno;
return 0;
- } else {
+ } else
+#endif
return save_i387_registers(pid, fp_regs);
- }
}
int restore_i387_registers(int pid, unsigned long *fp_regs)
@@ -48,6 +49,7 @@ int restore_i387_registers(int pid, unsigned long *fp_regs)
int restore_fp_registers(int pid, unsigned long *fp_regs)
{
+#ifdef PTRACE_SETREGSET
struct iovec iov;
if (have_xstate_support) {
@@ -56,9 +58,9 @@ int restore_fp_registers(int pid, unsigned long *fp_regs)
if (ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
return -errno;
return 0;
- } else {
+ } else
+#endif
return restore_i387_registers(pid, fp_regs);
- }
}
#ifdef __i386__
@@ -122,6 +124,7 @@ int put_fp_registers(int pid, unsigned long *regs)
void arch_init_registers(int pid)
{
+#ifdef PTRACE_GETREGSET
struct _xstate fp_regs;
struct iovec iov;
@@ -129,6 +132,7 @@ void arch_init_registers(int pid)
iov.iov_len = sizeof(struct _xstate);
if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) == 0)
have_xstate_support = 1;
+#endif
}
#endif
diff --git a/arch/x86/um/setjmp_32.S b/arch/x86/um/setjmp_32.S
index b766792c9933..39053192918d 100644
--- a/arch/x86/um/setjmp_32.S
+++ b/arch/x86/um/setjmp_32.S
@@ -16,9 +16,9 @@
.text
.align 4
- .globl setjmp
- .type setjmp, @function
-setjmp:
+ .globl kernel_setjmp
+ .type kernel_setjmp, @function
+kernel_setjmp:
#ifdef _REGPARM
movl %eax,%edx
#else
@@ -35,13 +35,13 @@ setjmp:
movl %ecx,20(%edx) # Return address
ret
- .size setjmp,.-setjmp
+ .size kernel_setjmp,.-kernel_setjmp
.text
.align 4
- .globl longjmp
- .type longjmp, @function
-longjmp:
+ .globl kernel_longjmp
+ .type kernel_longjmp, @function
+kernel_longjmp:
#ifdef _REGPARM
xchgl %eax,%edx
#else
@@ -55,4 +55,4 @@ longjmp:
movl 16(%edx),%edi
jmp *20(%edx)
- .size longjmp,.-longjmp
+ .size kernel_longjmp,.-kernel_longjmp
diff --git a/arch/x86/um/setjmp_64.S b/arch/x86/um/setjmp_64.S
index 45f547b4043e..c56942e1a38c 100644
--- a/arch/x86/um/setjmp_64.S
+++ b/arch/x86/um/setjmp_64.S
@@ -18,9 +18,9 @@
.text
.align 4
- .globl setjmp
- .type setjmp, @function
-setjmp:
+ .globl kernel_setjmp
+ .type kernel_setjmp, @function
+kernel_setjmp:
pop %rsi # Return address, and adjust the stack
xorl %eax,%eax # Return value
movq %rbx,(%rdi)
@@ -34,13 +34,13 @@ setjmp:
movq %rsi,56(%rdi) # Return address
ret
- .size setjmp,.-setjmp
+ .size kernel_setjmp,.-kernel_setjmp
.text
.align 4
- .globl longjmp
- .type longjmp, @function
-longjmp:
+ .globl kernel_longjmp
+ .type kernel_longjmp, @function
+kernel_longjmp:
movl %esi,%eax # Return value (int)
movq (%rdi),%rbx
movq 8(%rdi),%rsp
@@ -51,4 +51,4 @@ longjmp:
movq 48(%rdi),%r15
jmp *56(%rdi)
- .size longjmp,.-longjmp
+ .size kernel_longjmp,.-kernel_longjmp
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c
index cb3c22370cf5..ae4cd58c0c7a 100644
--- a/arch/x86/um/user-offsets.c
+++ b/arch/x86/um/user-offsets.c
@@ -5,7 +5,7 @@
#include <sys/mman.h>
#include <sys/user.h>
#define __FRAME_OFFSETS
-#include <asm/ptrace.h>
+#include <linux/ptrace.h>
#include <asm/types.h>
#ifdef __i386__
@@ -50,7 +50,11 @@ void foo(void)
DEFINE(HOST_GS, GS);
DEFINE(HOST_ORIG_AX, ORIG_EAX);
#else
+#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET)
DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long));
+#else
+ DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
+#endif
DEFINE_LONGS(HOST_BX, RBX);
DEFINE_LONGS(HOST_CX, RCX);
DEFINE_LONGS(HOST_DI, RDI);
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 1d7a7213a310..cab28cf2cffb 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2693,8 +2693,8 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
phys_addr_t paddr_vmcoreinfo_note(void)
{
if (xen_pv_domain())
- return virt_to_machine(&vmcoreinfo_note).maddr;
+ return virt_to_machine(vmcoreinfo_note).maddr;
else
- return __pa_symbol(&vmcoreinfo_note);
+ return __pa(vmcoreinfo_note);
}
#endif /* CONFIG_KEXEC_CORE */
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 30f6290109d4..2d716ebc5a5e 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -1,20 +1,17 @@
-generic-y += bitsperlong.h
generic-y += bug.h
generic-y += clkdev.h
generic-y += div64.h
generic-y += dma-contiguous.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
-generic-y += fcntl.h
+generic-y += fb.h
generic-y += hardirq.h
-generic-y += ioctl.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
-generic-y += kvm_para.h
+generic-y += kprobes.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
@@ -22,13 +19,9 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += percpu.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += rwsem.h
generic-y += sections.h
-generic-y += statfs.h
-generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += kprobes.h
diff --git a/arch/xtensa/include/asm/fb.h b/arch/xtensa/include/asm/fb.h
deleted file mode 100644
index c7df38030992..000000000000
--- a/arch/xtensa/include/asm/fb.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-#include <linux/fb.h>
-
-#define fb_pgprotect(...) do {} while (0)
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/xtensa/include/asm/flat.h b/arch/xtensa/include/asm/flat.h
index 94c44abf15e4..60e0d6a45795 100644
--- a/arch/xtensa/include/asm/flat.h
+++ b/arch/xtensa/include/asm/flat.h
@@ -1,11 +1,22 @@
#ifndef __ASM_XTENSA_FLAT_H
#define __ASM_XTENSA_FLAT_H
+#include <asm/unaligned.h>
+
#define flat_argvp_envp_on_stack() 0
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp)
-#define flat_put_addr_at_rp(rp, val, relval ) put_unaligned(val, rp)
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr, u32 *persistent)
+{
+ *addr = get_unaligned((__force u32 *)rp);
+ return 0;
+}
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+ put_unaligned(addr, (__force u32 *)rp);
+ return 0;
+}
#define flat_get_relocate_addr(rel) (rel)
#define flat_set_persistent(relval, p) 0
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
index 4cb0d2f8868c..a5bcdfb890f1 100644
--- a/arch/xtensa/include/uapi/asm/Kbuild
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -1,3 +1,12 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += kvm_para.h
+generic-y += resource.h
generic-y += siginfo.h
+generic-y += statfs.h
+generic-y += termios.h
diff --git a/certs/Makefile b/certs/Makefile
index 4119bb376ea1..847361ce14d1 100644
--- a/certs/Makefile
+++ b/certs/Makefile
@@ -26,7 +26,7 @@ quiet_cmd_extract_certs = EXTRACT_CERTS $(patsubst "%",%,$(2))
targets += x509_certificate_list
$(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE
$(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS))
-endif
+endif # CONFIG_SYSTEM_TRUSTED_KEYRING
clean-files := x509_certificate_list .x509.list
@@ -87,7 +87,7 @@ $(obj)/x509.genkey:
@echo >>$@ "keyUsage=digitalSignature"
@echo >>$@ "subjectKeyIdentifier=hash"
@echo >>$@ "authorityKeyIdentifier=keyid"
-endif
+endif # CONFIG_MODULE_SIG_KEY
$(eval $(call config_filename,MODULE_SIG_KEY))
@@ -102,4 +102,4 @@ $(obj)/system_certificates.o: $(obj)/signing_key.x509
targets += signing_key.x509
$(obj)/signing_key.x509: scripts/extract-cert $(X509_DEP) FORCE
$(call if_changed,extract_certs,$(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY))
-endif
+endif # CONFIG_MODULE_SIG
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 3556d8eb54a7..92a3d540d920 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -287,7 +287,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
goto unlock;
sock_init_data(newsock, sk2);
- sock_graft(sk2, newsock);
+ security_sock_graft(sk2, newsock);
security_sk_clone(sk, sk2);
err = type->accept(ask->private, sk2);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 854d428e2a2d..ddb01e9fa5b2 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -147,7 +147,7 @@ static unsigned int ec_storm_threshold __read_mostly = 8;
module_param(ec_storm_threshold, uint, 0644);
MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
-static bool ec_freeze_events __read_mostly = true;
+static bool ec_freeze_events __read_mostly = false;
module_param(ec_freeze_events, bool, 0644);
MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
@@ -1870,24 +1870,6 @@ error:
}
#ifdef CONFIG_PM_SLEEP
-static int acpi_ec_suspend_noirq(struct device *dev)
-{
- struct acpi_ec *ec =
- acpi_driver_data(to_acpi_device(dev));
-
- acpi_ec_enter_noirq(ec);
- return 0;
-}
-
-static int acpi_ec_resume_noirq(struct device *dev)
-{
- struct acpi_ec *ec =
- acpi_driver_data(to_acpi_device(dev));
-
- acpi_ec_leave_noirq(ec);
- return 0;
-}
-
static int acpi_ec_suspend(struct device *dev)
{
struct acpi_ec *ec =
@@ -1909,7 +1891,6 @@ static int acpi_ec_resume(struct device *dev)
#endif
static const struct dev_pm_ops acpi_ec_pm = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
};
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 830299a74b84..7c352cba0528 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -24,7 +24,7 @@ static struct fwnode_handle *acpi_gsi_domain_id;
*
* irq location updated with irq value [>0 on success, 0 on failure]
*
- * Returns: linux IRQ number on success (>0)
+ * Returns: 0 on success
* -EINVAL on failure
*/
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
@@ -37,7 +37,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
* *irq == 0 means no mapping, that should
* be reported as a failure
*/
- return (*irq > 0) ? *irq : -EINVAL;
+ return (*irq > 0) ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b75b734ee73a..19182d091587 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3160,6 +3160,8 @@ static struct acpi_driver acpi_nfit_driver = {
static __init int nfit_init(void)
{
+ int ret;
+
BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
@@ -3187,8 +3189,14 @@ static __init int nfit_init(void)
return -ENOMEM;
nfit_mce_register();
+ ret = acpi_bus_register_driver(&acpi_nfit_driver);
+ if (ret) {
+ nfit_mce_unregister();
+ destroy_workqueue(nfit_wq);
+ }
+
+ return ret;
- return acpi_bus_register_driver(&acpi_nfit_driver);
}
static __exit void nfit_exit(void)
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index bd86b809c848..b4fbb9929482 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -12,6 +12,7 @@
*/
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include "../internal.h"
@@ -20,6 +21,10 @@
* Some ACPI devices are hidden (status == 0x0) in recent BIOS-es because
* some recent Windows drivers bind to one device but poke at multiple
* devices at the same time, so the others get hidden.
+ *
+ * Some BIOS-es (temporarily) hide specific APCI devices to work around Windows
+ * driver bugs. We use DMI matching to match known cases of this.
+ *
* We work around this by always reporting ACPI_STA_DEFAULT for these
* devices. Note this MUST only be done for devices where this is safe.
*
@@ -31,14 +36,16 @@
struct always_present_id {
struct acpi_device_id hid[2];
struct x86_cpu_id cpu_ids[2];
+ struct dmi_system_id dmi_ids[2]; /* Optional */
const char *uid;
};
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
-#define ENTRY(hid, uid, cpu_models) { \
+#define ENTRY(hid, uid, cpu_models, dmi...) { \
{ { hid, }, {} }, \
{ cpu_models, {} }, \
+ { { .matches = dmi }, {} }, \
uid, \
}
@@ -47,13 +54,35 @@ static const struct always_present_id always_present_ids[] = {
* Bay / Cherry Trail PWM directly poked by GPU driver in win10,
* but Linux uses a separate PWM driver, harmless if not used.
*/
- ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1)),
- ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT)),
+ ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1), {}),
+ ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
/*
* The INT0002 device is necessary to clear wakeup interrupt sources
* on Cherry Trail devices, without it we get nobody cared IRQ msgs.
*/
- ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT)),
+ ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
+ /*
+ * On the Dell Venue 11 Pro 7130 the DSDT hides the touchscreen ACPI
+ * device until a certain time after _SB.PCI0.GFX0.LCD.LCD1._ON gets
+ * called has passed *and* _STA has been called at least 3 times since.
+ */
+ ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
+ }),
+ /*
+ * The GPD win BIOS dated 20170320 has disabled the accelerometer, the
+ * drivers sometimes cause crashes under Windows and this is how the
+ * manufacturer has solved this :| Note that the the DMI data is less
+ * generic then it seems, a board_vendor of "AMI Corporation" is quite
+ * rare and a board_name of "Default String" also is rare.
+ */
+ ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BIOS_DATE, "03/20/2017")
+ }),
};
bool acpi_device_always_present(struct acpi_device *adev)
@@ -76,6 +105,10 @@ bool acpi_device_always_present(struct acpi_device *adev)
if (!x86_match_cpu(always_present_ids[i].cpu_ids))
continue;
+ if (always_present_ids[i].dmi_ids[0].matches[0].slot &&
+ !dmi_check_system(always_present_ids[i].dmi_ids))
+ continue;
+
if (old_status != ACPI_STA_DEFAULT) /* Log only once */
dev_info(&adev->dev,
"Device [%s] is in always present list\n",
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index d3aa7482d1f7..1ef67db03c8e 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1613,7 +1613,7 @@ static int zatm_init_one(struct pci_dev *pci_dev,
ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
if (ret < 0)
- goto out_disable;
+ goto out_release;
zatm_dev->pci_dev = pci_dev;
dev->dev_data = zatm_dev;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 3b8210ebb50e..60303aa28587 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1222,8 +1222,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
spin_unlock_irq(&dev->power.lock);
- dev_pm_domain_set(dev, &genpd->domain);
-
return gpd_data;
err_free:
@@ -1237,8 +1235,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
static void genpd_free_dev_data(struct device *dev,
struct generic_pm_domain_data *gpd_data)
{
- dev_pm_domain_set(dev, NULL);
-
spin_lock_irq(&dev->power.lock);
dev->power.subsys_data->domain_data = NULL;
@@ -1275,6 +1271,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (ret)
goto out;
+ dev_pm_domain_set(dev, &genpd->domain);
+
genpd->device_count++;
genpd->max_off_time_changed = true;
@@ -1336,6 +1334,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
if (genpd->detach_dev)
genpd->detach_dev(genpd, dev);
+ dev_pm_domain_set(dev, NULL);
+
list_del_init(&pdd->list_node);
genpd_unlock(genpd);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 185a52581cfa..156ab57bca77 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -272,6 +272,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
value = PM_QOS_LATENCY_ANY;
+ else
+ return -EINVAL;
}
ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
return ret < 0 ? ret : n;
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 692007e5a94b..edf02c1b5845 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -253,10 +253,10 @@ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
{
bool ret;
- ret = fwnode_call_int_op(fwnode, property_present, propname);
+ ret = fwnode_call_bool_op(fwnode, property_present, propname);
if (ret == false && !IS_ERR_OR_NULL(fwnode) &&
!IS_ERR_OR_NULL(fwnode->secondary))
- ret = fwnode_call_int_op(fwnode->secondary, property_present,
+ ret = fwnode_call_bool_op(fwnode->secondary, property_present,
propname);
return ret;
}
@@ -1027,7 +1027,7 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
*/
bool fwnode_device_is_available(struct fwnode_handle *fwnode)
{
- return fwnode_call_int_op(fwnode, device_is_available);
+ return fwnode_call_bool_op(fwnode, device_is_available);
}
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 5f04e7bf063e..e6c64b0be5b2 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -1,7 +1,7 @@
/*
* Register map access API - W1 (1-Wire) support
*
- * Copyright (C) 2017 OAO Radioavionica
+ * Copyright (c) 2017 Radioavionica Corporation
* Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
*
* This program is free software; you can redistribute it and/or modify
@@ -11,7 +11,7 @@
#include <linux/regmap.h>
#include <linux/module.h>
-#include "../../w1/w1.h"
+#include <linux/w1.h>
#include "internal.h"
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 01a260f67437..afa3ce7d3e72 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -288,7 +288,6 @@
#define SEC_XFER_SIZE 512
#define EXTRACT_SIZE 10
-#define DEBUG_RANDOM_BOOT 0
#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
@@ -437,6 +436,7 @@ static void _extract_crng(struct crng_state *crng,
static void _crng_backtrack_protect(struct crng_state *crng,
__u8 tmp[CHACHA20_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
+static void _get_random_bytes(void *buf, int nbytes);
/**********************************************************************
*
@@ -777,7 +777,7 @@ static void crng_initialize(struct crng_state *crng)
_extract_entropy(&input_pool, &crng->state[4],
sizeof(__u32) * 12, 0);
else
- get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
@@ -851,11 +851,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
}
-static inline void crng_wait_ready(void)
-{
- wait_event_interruptible(crng_init_wait, crng_ready());
-}
-
static void _extract_crng(struct crng_state *crng,
__u8 out[CHACHA20_BLOCK_SIZE])
{
@@ -987,6 +982,11 @@ void add_device_randomness(const void *buf, unsigned int size)
unsigned long time = random_get_entropy() ^ jiffies;
unsigned long flags;
+ if (!crng_ready()) {
+ crng_fast_load(buf, size);
+ return;
+ }
+
trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(&input_pool, buf, size);
@@ -1472,22 +1472,44 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
return ret;
}
+#define warn_unseeded_randomness(previous) \
+ _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
+
+static void _warn_unseeded_randomness(const char *func_name, void *caller,
+ void **previous)
+{
+#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ const bool print_once = false;
+#else
+ static bool print_once __read_mostly;
+#endif
+
+ if (print_once ||
+ crng_ready() ||
+ (previous && (caller == READ_ONCE(*previous))))
+ return;
+ WRITE_ONCE(*previous, caller);
+#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ print_once = true;
+#endif
+ pr_notice("random: %s called from %pF with crng_init=%d\n",
+ func_name, caller, crng_init);
+}
+
/*
* This function is the exported kernel interface. It returns some
* number of good random numbers, suitable for key generation, seeding
* TCP sequence numbers, etc. It does not rely on the hardware random
* number generator. For random bytes direct from the hardware RNG
- * (when available), use get_random_bytes_arch().
+ * (when available), use get_random_bytes_arch(). In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
*/
-void get_random_bytes(void *buf, int nbytes)
+static void _get_random_bytes(void *buf, int nbytes)
{
__u8 tmp[CHACHA20_BLOCK_SIZE];
-#if DEBUG_RANDOM_BOOT > 0
- if (!crng_ready())
- printk(KERN_NOTICE "random: %pF get_random_bytes called "
- "with crng_init = %d\n", (void *) _RET_IP_, crng_init);
-#endif
trace_get_random_bytes(nbytes, _RET_IP_);
while (nbytes >= CHACHA20_BLOCK_SIZE) {
@@ -1504,9 +1526,35 @@ void get_random_bytes(void *buf, int nbytes)
crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE);
memzero_explicit(tmp, sizeof(tmp));
}
+
+void get_random_bytes(void *buf, int nbytes)
+{
+ static void *previous;
+
+ warn_unseeded_randomness(&previous);
+ _get_random_bytes(buf, nbytes);
+}
EXPORT_SYMBOL(get_random_bytes);
/*
+ * Wait for the urandom pool to be seeded and thus guaranteed to supply
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
+ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
+ * family of functions. Using any of these functions without first calling
+ * this function forfeits the guarantee of security.
+ *
+ * Returns: 0 if the urandom pool has been seeded.
+ * -ERESTARTSYS if the function was interrupted by a signal.
+ */
+int wait_for_random_bytes(void)
+{
+ if (likely(crng_ready()))
+ return 0;
+ return wait_event_interruptible(crng_init_wait, crng_ready());
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
+
+/*
* Add a callback function that will be invoked when the nonblocking
* pool is initialised.
*
@@ -1860,6 +1908,8 @@ const struct file_operations urandom_fops = {
SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
unsigned int, flags)
{
+ int ret;
+
if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
return -EINVAL;
@@ -1872,9 +1922,9 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
if (!crng_ready()) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
- crng_wait_ready();
- if (signal_pending(current))
- return -ERESTARTSYS;
+ ret = wait_for_random_bytes();
+ if (unlikely(ret))
+ return ret;
}
return urandom_read(NULL, buf, count, NULL);
}
@@ -2035,15 +2085,19 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
/*
* Get a random word for internal kernel use only. The quality of the random
* number is either as good as RDRAND or as good as /dev/urandom, with the
- * goal of being quite fast and not depleting entropy.
+ * goal of being quite fast and not depleting entropy. In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void)
{
u64 ret;
- bool use_lock = READ_ONCE(crng_init) < 2;
+ bool use_lock;
unsigned long flags = 0;
struct batched_entropy *batch;
+ static void *previous;
#if BITS_PER_LONG == 64
if (arch_get_random_long((unsigned long *)&ret))
@@ -2054,6 +2108,9 @@ u64 get_random_u64(void)
return ret;
#endif
+ warn_unseeded_randomness(&previous);
+
+ use_lock = READ_ONCE(crng_init) < 2;
batch = &get_cpu_var(batched_entropy_u64);
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
@@ -2073,13 +2130,17 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
u32 get_random_u32(void)
{
u32 ret;
- bool use_lock = READ_ONCE(crng_init) < 2;
+ bool use_lock;
unsigned long flags = 0;
struct batched_entropy *batch;
+ static void *previous;
if (arch_get_random_int(&ret))
return ret;
+ warn_unseeded_randomness(&previous);
+
+ use_lock = READ_ONCE(crng_init) < 2;
batch = &get_cpu_var(batched_entropy_u32);
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index d406b087553f..68ca2d9fcd73 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -221,6 +221,7 @@ config COMMON_CLK_VC5
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
+source "drivers/clk/imgtec/Kconfig"
source "drivers/clk/keystone/Kconfig"
source "drivers/clk/mediatek/Kconfig"
source "drivers/clk/meson/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 4f6a812342ed..cd376b3fb47a 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -60,6 +60,7 @@ obj-y += bcm/
obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-$(CONFIG_H8300) += h8300/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
+obj-y += imgtec/
obj-$(CONFIG_ARCH_MXC) += imx/
obj-$(CONFIG_MACH_INGENIC) += ingenic/
obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
diff --git a/drivers/clk/imgtec/Kconfig b/drivers/clk/imgtec/Kconfig
new file mode 100644
index 000000000000..f6dcb748e9c4
--- /dev/null
+++ b/drivers/clk/imgtec/Kconfig
@@ -0,0 +1,9 @@
+config COMMON_CLK_BOSTON
+ bool "Clock driver for MIPS Boston boards"
+ depends on MIPS || COMPILE_TEST
+ select MFD_SYSCON
+ ---help---
+ Enable this to support the system & CPU clocks on the MIPS Boston
+ development board from Imagination Technologies. These are simple
+ fixed rate clocks whose rate is determined by reading a platform
+ provided register.
diff --git a/drivers/clk/imgtec/Makefile b/drivers/clk/imgtec/Makefile
new file mode 100644
index 000000000000..ac779b8c22f2
--- /dev/null
+++ b/drivers/clk/imgtec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_COMMON_CLK_BOSTON) += clk-boston.o
diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
new file mode 100644
index 000000000000..f18f10351785
--- /dev/null
+++ b/drivers/clk/imgtec/clk-boston.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2016-2017 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) "clk-boston: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+
+#include <dt-bindings/clock/boston-clock.h>
+
+#define BOSTON_PLAT_MMCMDIV 0x30
+# define BOSTON_PLAT_MMCMDIV_CLK0DIV (0xff << 0)
+# define BOSTON_PLAT_MMCMDIV_INPUT (0xff << 8)
+# define BOSTON_PLAT_MMCMDIV_MUL (0xff << 16)
+# define BOSTON_PLAT_MMCMDIV_CLK1DIV (0xff << 24)
+
+#define BOSTON_CLK_COUNT 3
+
+static u32 ext_field(u32 val, u32 mask)
+{
+ return (val & mask) >> (ffs(mask) - 1);
+}
+
+static void __init clk_boston_setup(struct device_node *np)
+{
+ unsigned long in_freq, cpu_freq, sys_freq;
+ uint mmcmdiv, mul, cpu_div, sys_div;
+ struct clk_hw_onecell_data *onecell;
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int err;
+
+ regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(regmap)) {
+ pr_err("failed to find regmap\n");
+ return;
+ }
+
+ err = regmap_read(regmap, BOSTON_PLAT_MMCMDIV, &mmcmdiv);
+ if (err) {
+ pr_err("failed to read mmcm_div register: %d\n", err);
+ return;
+ }
+
+ in_freq = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_INPUT) * 1000000;
+ mul = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_MUL);
+
+ sys_div = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_CLK0DIV);
+ sys_freq = mult_frac(in_freq, mul, sys_div);
+
+ cpu_div = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_CLK1DIV);
+ cpu_freq = mult_frac(in_freq, mul, cpu_div);
+
+ onecell = kzalloc(sizeof(*onecell) +
+ (BOSTON_CLK_COUNT * sizeof(struct clk_hw *)),
+ GFP_KERNEL);
+ if (!onecell)
+ return;
+
+ onecell->num = BOSTON_CLK_COUNT;
+
+ hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq);
+ if (IS_ERR(hw)) {
+ pr_err("failed to register input clock: %ld\n", PTR_ERR(hw));
+ return;
+ }
+ onecell->hws[BOSTON_CLK_INPUT] = hw;
+
+ hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq);
+ if (IS_ERR(hw)) {
+ pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw));
+ return;
+ }
+ onecell->hws[BOSTON_CLK_SYS] = hw;
+
+ hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq);
+ if (IS_ERR(hw)) {
+ pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw));
+ return;
+ }
+ onecell->hws[BOSTON_CLK_CPU] = hw;
+
+ err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, onecell);
+ if (err)
+ pr_err("failed to add DT provider: %d\n", err);
+}
+
+/*
+ * Use CLK_OF_DECLARE so that this driver is probed early enough to provide the
+ * CPU frequency for use with the GIC or cop0 counters/timers.
+ */
+CLK_OF_DECLARE(clk_boston, "img,boston-clock", clk_boston_setup);
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 418042201e6d..ea6d62547b10 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -540,7 +540,7 @@ static void bL_cpufreq_ready(struct cpufreq_policy *policy)
&power_coefficient);
cdev[cur_cluster] = of_cpufreq_power_cooling_register(np,
- policy->related_cpus, power_coefficient, NULL);
+ policy, power_coefficient, NULL);
if (IS_ERR(cdev[cur_cluster])) {
dev_err(cpu_dev,
"running cpufreq without cooling device: %ld\n",
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index c943787d761e..fef3c2160691 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -326,7 +326,7 @@ static void cpufreq_ready(struct cpufreq_policy *policy)
&power_coefficient);
priv->cdev = of_cpufreq_power_cooling_register(np,
- policy->related_cpus, power_coefficient, NULL);
+ policy, power_coefficient, NULL);
if (IS_ERR(priv->cdev)) {
dev_err(priv->cpu_dev,
"running cpufreq without cooling device: %ld\n",
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index d362739a71f3..e75880eb037d 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -170,11 +170,10 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
unsigned int i = 0, count = 0, ret = -ENOMEM;
struct cpufreq_stats *stats;
unsigned int alloc_size;
- struct cpufreq_frequency_table *pos, *table;
+ struct cpufreq_frequency_table *pos;
- /* We need cpufreq table for creating stats table */
- table = policy->freq_table;
- if (unlikely(!table))
+ count = cpufreq_table_count_valid_entries(policy);
+ if (!count)
return;
/* stats already initialized */
@@ -185,10 +184,6 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
if (!stats)
return;
- /* Find total allocation size */
- cpufreq_for_each_valid_entry(pos, table)
- count++;
-
alloc_size = count * sizeof(int) + count * sizeof(u64);
alloc_size += count * count * sizeof(int);
@@ -205,7 +200,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
stats->max_state = count;
/* Find valid-unique entries */
- cpufreq_for_each_valid_entry(pos, table)
+ cpufreq_for_each_valid_entry(pos, policy->freq_table)
if (freq_table_get_index(stats, pos->frequency) == -1)
stats->freq_table[i++] = pos->frequency;
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 3575b82210ba..4ee0431579c1 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -43,7 +43,7 @@ static int dbx500_cpufreq_exit(struct cpufreq_policy *policy)
static void dbx500_cpufreq_ready(struct cpufreq_policy *policy)
{
- cdev = cpufreq_cooling_register(policy->cpus);
+ cdev = cpufreq_cooling_register(policy);
if (IS_ERR(cdev))
pr_err("Failed to register cooling device %ld\n", PTR_ERR(cdev));
else
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index d6f323560da3..6cd503525638 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -225,6 +225,9 @@ struct global_params {
* @vid: Stores VID limits for this CPU
* @pid: Stores PID parameters for this CPU
* @last_sample_time: Last Sample time
+ * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
+ * This shift is a multiplier to mperf delta to
+ * calculate CPU busy.
* @prev_aperf: Last APERF value read from APERF MSR
* @prev_mperf: Last MPERF value read from MPERF MSR
* @prev_tsc: Last timestamp counter (TSC) value
@@ -259,6 +262,7 @@ struct cpudata {
u64 last_update;
u64 last_sample_time;
+ u64 aperf_mperf_shift;
u64 prev_aperf;
u64 prev_mperf;
u64 prev_tsc;
@@ -321,6 +325,7 @@ struct pstate_funcs {
int (*get_min)(void);
int (*get_turbo)(void);
int (*get_scaling)(void);
+ int (*get_aperf_mperf_shift)(void);
u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *);
void (*update_util)(struct update_util_data *data, u64 time,
@@ -572,7 +577,7 @@ static int min_perf_pct_min(void)
int turbo_pstate = cpu->pstate.turbo_pstate;
return turbo_pstate ?
- DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0;
+ (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
}
static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
@@ -1486,6 +1491,11 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
return val;
}
+static int knl_get_aperf_mperf_shift(void)
+{
+ return 10;
+}
+
static int knl_get_turbo_pstate(void)
{
u64 value;
@@ -1543,6 +1553,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ if (pstate_funcs.get_aperf_mperf_shift)
+ cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
+
if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu);
@@ -1616,7 +1629,8 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
int32_t busy_frac, boost;
int target, avg_pstate;
- busy_frac = div_fp(sample->mperf, sample->tsc);
+ busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
+ sample->tsc);
boost = cpu->iowait_boost;
cpu->iowait_boost >>= 1;
@@ -1675,7 +1689,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
perf_scaled = mul_fp(perf_scaled, sample_ratio);
} else {
- sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+ sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift),
+ cpu->sample.tsc);
if (sample_ratio < int_tofp(1))
perf_scaled = 0;
}
@@ -1807,6 +1822,7 @@ static const struct pstate_funcs knl_funcs = {
.get_max_physical = core_get_max_pstate_physical,
.get_min = core_get_min_pstate,
.get_turbo = knl_get_turbo_pstate,
+ .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
.get_scaling = core_get_scaling,
.get_val = core_get_val,
.update_util = intel_pstate_update_util_pid,
@@ -2403,6 +2419,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_val = funcs->get_val;
pstate_funcs.get_vid = funcs->get_vid;
pstate_funcs.update_util = funcs->update_util;
+ pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
intel_pstate_use_acpi_profile();
}
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index fd1886faf33a..f9f00fb4bc3a 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -320,9 +320,7 @@ static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
info->cdev = of_cpufreq_power_cooling_register(np,
- policy->related_cpus,
- capacitance,
- NULL);
+ policy, capacitance, NULL);
if (IS_ERR(info->cdev)) {
dev_err(info->cpu_dev,
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index e2ea433a5f9c..4ada55b8856e 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -278,8 +278,7 @@ static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
if (of_find_property(np, "#cooling-cells", NULL)) {
- cpud->cdev = of_cpufreq_cooling_register(np,
- policy->related_cpus);
+ cpud->cdev = of_cpufreq_cooling_register(np, policy);
if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
pr_err("cpu%d is not running as cooling device: %ld\n",
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index a9482023d7d3..dad4e5bad827 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1204,7 +1204,9 @@ static int atmel_sha_finup(struct ahash_request *req)
ctx->flags |= SHA_FLAGS_FINUP;
err1 = atmel_sha_update(req);
- if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ if (err1 == -EINPROGRESS ||
+ (err1 == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err1;
/*
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index fde399c88779..0488b7f81dcf 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -882,10 +882,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct ablkcipher_request *req = context;
struct ablkcipher_edesc *edesc;
-#ifdef DEBUG
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+#ifdef DEBUG
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
@@ -904,6 +904,14 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
#endif
ablkcipher_unmap(jrdev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block. This is used e.g. by the CTS mode.
+ */
+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ ivsize, 0);
+
kfree(edesc);
ablkcipher_request_complete(req, err);
@@ -914,10 +922,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct ablkcipher_request *req = context;
struct ablkcipher_edesc *edesc;
-#ifdef DEBUG
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+#ifdef DEBUG
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
@@ -935,6 +943,14 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
#endif
ablkcipher_unmap(jrdev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block.
+ */
+ scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+ ivsize, 0);
+
kfree(edesc);
ablkcipher_request_complete(req, err);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 7c44c90ad593..910ec61cae09 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -396,7 +396,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR,
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 1bb2816a9b4d..c425d4adaf2a 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -149,7 +149,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 1b220f3ed017..df21d996db7e 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -222,17 +222,17 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
return -EINPROGRESS;
}
-int cvm_encrypt(struct ablkcipher_request *req)
+static int cvm_encrypt(struct ablkcipher_request *req)
{
return cvm_enc_dec(req, true);
}
-int cvm_decrypt(struct ablkcipher_request *req)
+static int cvm_decrypt(struct ablkcipher_request *req)
{
return cvm_enc_dec(req, false);
}
-int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
@@ -336,7 +336,7 @@ static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return cvm_setkey(cipher, key, keylen, DES3_ECB);
}
-int cvm_enc_dec_init(struct crypto_tfm *tfm)
+static int cvm_enc_dec_init(struct crypto_tfm *tfm)
{
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index cfc723a10610..0e8160701833 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -898,26 +898,20 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
u8 *key;
unsigned int keylen;
- cipher = crypto_alloc_cipher("aes-generic", 0, 0);
+ cipher = ablkctx->aes_generic;
memcpy(iv, req->info, AES_BLOCK_SIZE);
- if (IS_ERR(cipher)) {
- ret = -ENOMEM;
- goto out;
- }
keylen = ablkctx->enckey_len / 2;
key = ablkctx->key + keylen;
ret = crypto_cipher_setkey(cipher, key, keylen);
if (ret)
- goto out1;
+ goto out;
crypto_cipher_encrypt_one(cipher, iv, iv);
for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
crypto_cipher_decrypt_one(cipher, iv, iv);
-out1:
- crypto_free_cipher(cipher);
out:
return ret;
}
@@ -1261,6 +1255,17 @@ static int chcr_cra_init(struct crypto_tfm *tfm)
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
+
+ if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
+ /* To update tweak*/
+ ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
+ if (IS_ERR(ablkctx->aes_generic)) {
+ pr_err("failed to allocate aes cipher for tweak\n");
+ return PTR_ERR(ablkctx->aes_generic);
+ }
+ } else
+ ablkctx->aes_generic = NULL;
+
tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
@@ -1291,6 +1296,8 @@ static void chcr_cra_exit(struct crypto_tfm *tfm)
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
crypto_free_skcipher(ablkctx->sw_cipher);
+ if (ablkctx->aes_generic)
+ crypto_free_cipher(ablkctx->aes_generic);
}
static int get_alg_config(struct algo_param *params,
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index a4f95b014b46..30af1ee17b87 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -155,6 +155,7 @@
struct ablk_ctx {
struct crypto_skcipher *sw_cipher;
+ struct crypto_cipher *aes_generic;
__be32 key_ctx_hdr;
unsigned int enckey_len;
unsigned char ciph_mode;
diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h
index fdcd9769ffde..688b051750bd 100644
--- a/drivers/dax/device-dax.h
+++ b/drivers/dax/device-dax.h
@@ -21,5 +21,5 @@ struct dax_region *alloc_dax_region(struct device *parent,
int region_id, struct resource *res, unsigned int align,
void *addr, unsigned long flags);
struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
- struct resource *res, int count);
+ int id, struct resource *res, int count);
#endif /* __DEVICE_DAX_H__ */
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 12943d19bfc4..e9f3b3e4bbf4 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -529,7 +529,8 @@ static void dev_dax_release(struct device *dev)
struct dax_region *dax_region = dev_dax->region;
struct dax_device *dax_dev = dev_dax->dax_dev;
- ida_simple_remove(&dax_region->ida, dev_dax->id);
+ if (dev_dax->id >= 0)
+ ida_simple_remove(&dax_region->ida, dev_dax->id);
dax_region_put(dax_region);
put_dax(dax_dev);
kfree(dev_dax);
@@ -559,7 +560,7 @@ static void unregister_dev_dax(void *dev)
}
struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
- struct resource *res, int count)
+ int id, struct resource *res, int count)
{
struct device *parent = dax_region->dev;
struct dax_device *dax_dev;
@@ -567,7 +568,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
struct inode *inode;
struct device *dev;
struct cdev *cdev;
- int rc = 0, i;
+ int rc, i;
+
+ if (!count)
+ return ERR_PTR(-EINVAL);
dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
if (!dev_dax)
@@ -587,10 +591,16 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
if (i < count)
goto err_id;
- dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
- if (dev_dax->id < 0) {
- rc = dev_dax->id;
- goto err_id;
+ if (id < 0) {
+ id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
+ dev_dax->id = id;
+ if (id < 0) {
+ rc = id;
+ goto err_id;
+ }
+ } else {
+ /* region provider owns @id lifetime */
+ dev_dax->id = -1;
}
/*
@@ -598,8 +608,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
* device outside of mmap of the resulting character device.
*/
dax_dev = alloc_dax(dev_dax, NULL, NULL);
- if (!dax_dev)
+ if (!dax_dev) {
+ rc = -ENOMEM;
goto err_dax;
+ }
/* from here on we're committed to teardown via dax_dev_release() */
dev = &dev_dax->dev;
@@ -620,7 +632,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
dev->parent = parent;
dev->groups = dax_attribute_groups;
dev->release = dev_dax_release;
- dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
+ dev_set_name(dev, "dax%d.%d", dax_region->id, id);
rc = cdev_device_add(cdev, dev);
if (rc) {
@@ -636,7 +648,8 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
return dev_dax;
err_dax:
- ida_simple_remove(&dax_region->ida, dev_dax->id);
+ if (dev_dax->id >= 0)
+ ida_simple_remove(&dax_region->ida, dev_dax->id);
err_id:
kfree(dev_dax);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 9f2a0b4fd801..8d8c852ba8f2 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -58,13 +58,12 @@ static void dax_pmem_percpu_kill(void *data)
static int dax_pmem_probe(struct device *dev)
{
- int rc;
void *addr;
struct resource res;
+ int rc, id, region_id;
struct nd_pfn_sb *pfn_sb;
struct dev_dax *dev_dax;
struct dax_pmem *dax_pmem;
- struct nd_region *nd_region;
struct nd_namespace_io *nsio;
struct dax_region *dax_region;
struct nd_namespace_common *ndns;
@@ -123,14 +122,17 @@ static int dax_pmem_probe(struct device *dev)
/* adjust the dax_region resource to the start of data */
res.start += le64_to_cpu(pfn_sb->dataoff);
- nd_region = to_nd_region(dev->parent);
- dax_region = alloc_dax_region(dev, nd_region->id, &res,
+ rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
+ if (rc != 2)
+ return -EINVAL;
+
+ dax_region = alloc_dax_region(dev, region_id, &res,
le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
if (!dax_region)
return -ENOMEM;
/* TODO: support for subdividing a dax region... */
- dev_dax = devm_create_dev_dax(dax_region, &res, 1);
+ dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
/* child dev_dax instances now own the lifetime of the dax_region */
dax_region_put(dax_region);
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 176976068bcd..77028c27593c 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -86,7 +86,7 @@ static struct attribute *dev_entries[] = {
&dev_attr_set_freq.attr,
NULL,
};
-static struct attribute_group dev_attr_group = {
+static const struct attribute_group dev_attr_group = {
.name = "userspace",
.attrs = dev_entries,
};
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 40a2499730fc..1b89ebbad02c 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -336,8 +336,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "Cannot get the dmc interrupt resource\n");
- return -EINVAL;
+ dev_err(&pdev->dev,
+ "Cannot get the dmc interrupt resource: %d\n", irq);
+ return irq;
}
data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL);
if (!data)
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 214fff96fa4a..ae712159246f 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -688,9 +688,9 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "Failed to get IRQ\n");
- return -ENODEV;
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ return irq;
}
platform_set_drvdata(pdev, tegra);
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index f7425960f6a5..37e24f525162 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -17,6 +17,7 @@ cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
+ -D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector)
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 99f9a4beb859..67fe19e5a9c6 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -161,7 +161,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
int ret;
if (!panel)
- return ERR_PTR(EINVAL);
+ return ERR_PTR(-EINVAL);
panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index ec1ed94b2390..d34e5096887a 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/uio.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drmP.h>
@@ -140,101 +141,83 @@ static loff_t auxdev_llseek(struct file *file, loff_t offset, int whence)
return fixed_size_llseek(file, offset, whence, AUX_MAX_OFFSET);
}
-static ssize_t auxdev_read(struct file *file, char __user *buf, size_t count,
- loff_t *offset)
+static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- size_t bytes_pending, num_bytes_processed = 0;
- struct drm_dp_aux_dev *aux_dev = file->private_data;
+ struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data;
+ loff_t pos = iocb->ki_pos;
ssize_t res = 0;
if (!atomic_inc_not_zero(&aux_dev->usecount))
return -ENODEV;
- bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - (*offset));
-
- if (!access_ok(VERIFY_WRITE, buf, bytes_pending)) {
- res = -EFAULT;
- goto out;
- }
+ iov_iter_truncate(to, AUX_MAX_OFFSET - pos);
- while (bytes_pending > 0) {
- uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
- ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
+ while (iov_iter_count(to)) {
+ uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES];
+ ssize_t todo = min(iov_iter_count(to), sizeof(buf));
if (signal_pending(current)) {
- res = num_bytes_processed ?
- num_bytes_processed : -ERESTARTSYS;
- goto out;
+ res = -ERESTARTSYS;
+ break;
}
- res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo);
- if (res <= 0) {
- res = num_bytes_processed ? num_bytes_processed : res;
- goto out;
- }
- if (__copy_to_user(buf + num_bytes_processed, localbuf, res)) {
- res = num_bytes_processed ?
- num_bytes_processed : -EFAULT;
- goto out;
+ res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
+ if (res <= 0)
+ break;
+
+ if (copy_to_iter(buf, res, to) != res) {
+ res = -EFAULT;
+ break;
}
- bytes_pending -= res;
- *offset += res;
- num_bytes_processed += res;
- res = num_bytes_processed;
+
+ pos += res;
}
-out:
+ if (pos != iocb->ki_pos)
+ res = pos - iocb->ki_pos;
+ iocb->ki_pos = pos;
+
atomic_dec(&aux_dev->usecount);
wake_up_atomic_t(&aux_dev->usecount);
return res;
}
-static ssize_t auxdev_write(struct file *file, const char __user *buf,
- size_t count, loff_t *offset)
+static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- size_t bytes_pending, num_bytes_processed = 0;
- struct drm_dp_aux_dev *aux_dev = file->private_data;
+ struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data;
+ loff_t pos = iocb->ki_pos;
ssize_t res = 0;
if (!atomic_inc_not_zero(&aux_dev->usecount))
return -ENODEV;
- bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - *offset);
-
- if (!access_ok(VERIFY_READ, buf, bytes_pending)) {
- res = -EFAULT;
- goto out;
- }
+ iov_iter_truncate(from, AUX_MAX_OFFSET - pos);
- while (bytes_pending > 0) {
- uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
- ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
+ while (iov_iter_count(from)) {
+ uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES];
+ ssize_t todo = min(iov_iter_count(from), sizeof(buf));
if (signal_pending(current)) {
- res = num_bytes_processed ?
- num_bytes_processed : -ERESTARTSYS;
- goto out;
+ res = -ERESTARTSYS;
+ break;
}
- if (__copy_from_user(localbuf,
- buf + num_bytes_processed, todo)) {
- res = num_bytes_processed ?
- num_bytes_processed : -EFAULT;
- goto out;
+ if (!copy_from_iter_full(buf, todo, from)) {
+ res = -EFAULT;
+ break;
}
- res = drm_dp_dpcd_write(aux_dev->aux, *offset, localbuf, todo);
- if (res <= 0) {
- res = num_bytes_processed ? num_bytes_processed : res;
- goto out;
- }
- bytes_pending -= res;
- *offset += res;
- num_bytes_processed += res;
- res = num_bytes_processed;
+ res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
+ if (res <= 0)
+ break;
+
+ pos += res;
}
-out:
+ if (pos != iocb->ki_pos)
+ res = pos - iocb->ki_pos;
+ iocb->ki_pos = pos;
+
atomic_dec(&aux_dev->usecount);
wake_up_atomic_t(&aux_dev->usecount);
return res;
@@ -251,8 +234,8 @@ static int auxdev_release(struct inode *inode, struct file *file)
static const struct file_operations auxdev_fops = {
.owner = THIS_MODULE,
.llseek = auxdev_llseek,
- .read = auxdev_read,
- .write = auxdev_write,
+ .read_iter = auxdev_read_iter,
+ .write_iter = auxdev_write_iter,
.open = auxdev_open,
.release = auxdev_release,
};
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index fc8ef42203ec..b3ef4f1c2630 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -832,6 +832,7 @@ unlock:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 0b2d8c4a2fa5..d1f202852028 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -112,6 +112,9 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
v32.version_major = v.version_major;
v32.version_minor = v.version_minor;
v32.version_patchlevel = v.version_patchlevel;
+ v32.name_len = v.name_len;
+ v32.date_len = v.date_len;
+ v32.desc_len = v.desc_len;
if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
return -EFAULT;
return 0;
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 463e4d81fb0d..e9f33cd805dd 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -242,7 +242,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
* Otherwise reinitialize delayed at next vblank interrupt and assign 0
* for now, to mark the vblanktimestamp as invalid.
*/
- if (!rc && in_vblank_irq)
+ if (!rc && !in_vblank_irq)
t_vblank = (struct timeval) {0, 0};
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 51241de5e7a7..713848c36349 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2536,6 +2536,11 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head == gma_tail)
return 0;
+ if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
@@ -2579,6 +2584,11 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = workload;
+ if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e0261fcc5b50..2deb05f618fb 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -197,6 +197,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ if (IS_BROADWELL(dev_priv)) {
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &=
+ ~PORT_CLK_SEL_MASK;
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |=
+ PORT_CLK_SEL_LCPLL_810;
+ }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
@@ -211,6 +217,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ if (IS_BROADWELL(dev_priv)) {
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &=
+ ~PORT_CLK_SEL_MASK;
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |=
+ PORT_CLK_SEL_LCPLL_810;
+ }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
@@ -225,6 +237,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ if (IS_BROADWELL(dev_priv)) {
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &=
+ ~PORT_CLK_SEL_MASK;
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |=
+ PORT_CLK_SEL_LCPLL_810;
+ }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
@@ -244,6 +262,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
}
+
+ /* Clear host CRT status, so guest couldn't detect this host CRT. */
+ if (IS_BROADWELL(dev_priv))
+ vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 66374dba3b1a..6166e34d892b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2259,6 +2259,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
ret = setup_spt_oos(gvt);
if (ret) {
gvt_err("fail to initialize SPT oos\n");
+ dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ __free_page(gvt->gtt.scratch_ggtt_page);
return ret;
}
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1414d7e6148d..17febe830ff6 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -367,21 +367,24 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- *(u32 *)p_data = (1 << 17);
- return 0;
-}
-
-static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- *(u32 *)p_data = 3;
- return 0;
-}
+ switch (offset) {
+ case 0xe651c:
+ case 0xe661c:
+ case 0xe671c:
+ case 0xe681c:
+ vgpu_vreg(vgpu, offset) = 1 << 17;
+ break;
+ case 0xe6c04:
+ vgpu_vreg(vgpu, offset) = 0x3;
+ break;
+ case 0xe6e1c:
+ vgpu_vreg(vgpu, offset) = 0x2f << 16;
+ break;
+ default:
+ return -EINVAL;
+ }
-static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- *(u32 *)p_data = (0x2f << 16);
+ read_vreg(vgpu, offset, p_data, bytes);
return 0;
}
@@ -1925,7 +1928,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write);
+ MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
@@ -2011,8 +2014,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL);
- MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL);
+ MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
PORTA_HOTPLUG_STATUS_MASK
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 1ae0b4083ce1..fd0c85f9ef3c 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -232,16 +232,20 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
struct device *dev = mdev_dev(vgpu->vdev.mdev);
unsigned long gfn;
- mutex_lock(&vgpu->vdev.cache_lock);
- while ((node = rb_first(&vgpu->vdev.cache))) {
+ for (;;) {
+ mutex_lock(&vgpu->vdev.cache_lock);
+ node = rb_first(&vgpu->vdev.cache);
+ if (!node) {
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ break;
+ }
dma = rb_entry(node, struct gvt_dma, node);
gvt_dma_unmap_iova(vgpu, dma->iova);
gfn = dma->gfn;
-
- vfio_unpin_pages(dev, &gfn, 1);
__gvt_cache_remove_entry(vgpu, dma);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ vfio_unpin_pages(dev, &gfn, 1);
}
- mutex_unlock(&vgpu->vdev.cache_lock);
}
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 488fdea348a9..4f7057d62d88 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -174,15 +174,6 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
- /* If the status is -EINPROGRESS means this workload
- * doesn't meet any issue during dispatching so when
- * get the SCHEDULE_OUT set the status to be zero for
- * good. If the status is NOT -EINPROGRESS means there
- * is something wrong happened during dispatching and
- * the status should not be set to zero
- */
- if (workload->status == -EINPROGRESS)
- workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
@@ -427,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
+ /* If this request caused GPU hang, req->fence.error will
+ * be set to -EIO. Use -EIO to set workload status so
+ * that when this request caused GPU hang, didn't trigger
+ * context switch interrupt to guest.
+ */
+ if (likely(workload->status == -EINPROGRESS)) {
+ if (workload->req->fence.error == -EIO)
+ workload->status = -EIO;
+ else
+ workload->status = 0;
+ }
+
i915_gem_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !vgpu->resetting) {
@@ -464,8 +467,6 @@ struct workload_thread_param {
int ring_id;
};
-static DEFINE_MUTEX(scheduler_mutex);
-
static int workload_thread(void *priv)
{
struct workload_thread_param *p = (struct workload_thread_param *)priv;
@@ -497,8 +498,6 @@ static int workload_thread(void *priv)
if (!workload)
break;
- mutex_lock(&scheduler_mutex);
-
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
workload->ring_id, workload,
workload->vgpu->id);
@@ -537,9 +536,6 @@ complete:
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
-
- mutex_unlock(&scheduler_mutex);
-
}
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3f44076ec8a0..00d8967c8512 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3087,7 +3087,7 @@ static void intel_connector_info(struct seq_file *m,
connector->display_info.cea_rev);
}
- if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ if (!intel_encoder)
return;
switch (connector->connector_type) {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ee2325b180e7..fc307e03943c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1132,10 +1132,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* and the registers being closely associated.
*
* According to chipset errata, on the 965GM, MSI interrupts may
- * be lost or delayed, but we use them anyways to avoid
- * stuck interrupts on some machines.
+ * be lost or delayed, and was defeatured. MSI interrupts seem to
+ * get lost on g4x as well, and interrupt delivery seems to stay
+ * properly dead afterwards. So we'll just disable them for all
+ * pre-gen5 chipsets.
*/
- if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0)
DRM_DEBUG_DRIVER("can't enable MSI");
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7dcac3bfb771..969bac8404f1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2434,8 +2434,9 @@ rebuild_st:
* again with !__GFP_NORETRY. However, we still
* want to fail this allocation rather than
* trigger the out-of-memory killer and for
- * this we want the future __GFP_MAYFAIL.
+ * this we want __GFP_RETRY_MAYFAIL.
*/
+ gfp |= __GFP_RETRY_MAYFAIL;
}
} while (1);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 9337446f1068..054b2e54cdaf 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -288,20 +288,26 @@ static int eb_create(struct i915_execbuffer *eb)
* direct lookup.
*/
do {
+ unsigned int flags;
+
+ /* While we can still reduce the allocation size, don't
+ * raise a warning and allow the allocation to fail.
+ * On the last pass though, we want to try as hard
+ * as possible to perform the allocation and warn
+ * if it fails.
+ */
+ flags = GFP_TEMPORARY;
+ if (size > 1)
+ flags |= __GFP_NORETRY | __GFP_NOWARN;
+
eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
- GFP_TEMPORARY |
- __GFP_NORETRY |
- __GFP_NOWARN);
+ flags);
if (eb->buckets)
break;
} while (--size);
- if (unlikely(!eb->buckets)) {
- eb->buckets = kzalloc(sizeof(struct hlist_head),
- GFP_TEMPORARY);
- if (unlikely(!eb->buckets))
- return -ENOMEM;
- }
+ if (unlikely(!size))
+ return -ENOMEM;
eb->lut_size = size;
} else {
@@ -452,7 +458,7 @@ eb_add_vma(struct i915_execbuffer *eb,
return err;
}
- if (eb->lut_size >= 0) {
+ if (eb->lut_size > 0) {
vma->exec_handle = entry->handle;
hlist_add_head(&vma->exec_node,
&eb->buckets[hash_32(entry->handle,
@@ -894,7 +900,7 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
static void eb_reset_vmas(const struct i915_execbuffer *eb)
{
eb_release_vmas(eb);
- if (eb->lut_size >= 0)
+ if (eb->lut_size > 0)
memset(eb->buckets, 0,
sizeof(struct hlist_head) << eb->lut_size);
}
@@ -903,7 +909,7 @@ static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
- if (eb->lut_size >= 0)
+ if (eb->lut_size > 0)
kfree(eb->buckets);
}
@@ -2180,8 +2186,11 @@ i915_gem_do_execbuffer(struct drm_device *dev,
}
}
- if (eb_create(&eb))
- return -ENOMEM;
+ err = eb_create(&eb);
+ if (err)
+ goto err_out_fence;
+
+ GEM_BUG_ON(!eb.lut_size);
/*
* Take a local wakeref for preparing to dispatch the execbuf as
@@ -2340,6 +2349,7 @@ err_unlock:
err_rpm:
intel_runtime_pm_put(eb.i915);
eb_destroy(&eb);
+err_out_fence:
if (out_fence_fd != -1)
put_unused_fd(out_fence_fd);
err_in_fence:
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 38c44407bafc..9cd22f83b0cf 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2067,10 +2067,6 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return ret;
}
- ret = alloc_oa_buffer(dev_priv);
- if (ret)
- goto err_oa_buf_alloc;
-
/* PRM - observability performance counters:
*
* OACONTROL, performance counter enable, note:
@@ -2086,6 +2082,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
intel_runtime_pm_get(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ ret = alloc_oa_buffer(dev_priv);
+ if (ret)
+ goto err_oa_buf_alloc;
+
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv);
if (ret)
goto err_enable;
@@ -2097,11 +2097,11 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return 0;
err_enable:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
free_oa_buffer(dev_priv);
err_oa_buf_alloc:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
if (stream->ctx)
oa_put_render_ctx_id(stream);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c8647cfa81ba..64cc674b652a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1802,7 +1802,7 @@ enum skl_disp_power_wells {
#define POST_CURSOR_2(x) ((x) << 6)
#define POST_CURSOR_2_MASK (0x3F << 6)
#define CURSOR_COEFF(x) ((x) << 0)
-#define CURSOR_COEFF_MASK (0x3F << 6)
+#define CURSOR_COEFF_MASK (0x3F << 0)
#define _CNL_PORT_TX_DW5_GRP_AE 0x162354
#define _CNL_PORT_TX_DW5_GRP_B 0x1623D4
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index b8914db7d2e1..1241e5891b29 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -491,6 +491,14 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
int cdclk = cdclk_state->cdclk;
u32 val, cmd;
+ /* There are cases where we can end up here with power domains
+ * off and a CDCLK frequency other than the minimum, like when
+ * issuing a modeset without actually changing any display after
+ * a system suspend. So grab the PIPE-A domain, which covers
+ * the HW blocks needed for the following programming.
+ */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2;
else if (cdclk == 266667)
@@ -549,6 +557,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
intel_update_cdclk(dev_priv);
vlv_program_pfi_credits(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
@@ -568,6 +578,14 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
+ /* There are cases where we can end up here with power domains
+ * off and a CDCLK frequency other than the minimum, like when
+ * issuing a modeset without actually changing any display after
+ * a system suspend. So grab the PIPE-A domain, which covers
+ * the HW blocks needed for the following programming.
+ */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+
/*
* Specs are full of misinformation, but testing on actual
* hardware has shown that we just need to write the desired
@@ -590,6 +608,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
intel_update_cdclk(dev_priv);
vlv_program_pfi_credits(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
static int bdw_calc_cdclk(int max_pixclk)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a4487c5b7e37..5b4de719bec3 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -821,9 +821,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
- /* WaDisableKillLogic:bxt,skl,kbl,cfl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
+ /* WaDisableKillLogic:bxt,skl,kbl */
+ if (!IS_COFFEELAKE(dev_priv))
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ ECOCHK_DIS_TLB);
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
@@ -894,10 +895,9 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT);
- /* WaDisableHDCInvalidation:skl,bxt,kbl */
- if (!IS_COFFEELAKE(dev_priv))
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
+ /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(dev_priv) ||
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 03347c6ae599..0c4cde6b2e6f 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -535,13 +535,14 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
drm_fb_helper_fini(&ifbdev->helper);
- if (ifbdev->fb) {
+ if (ifbdev->vma) {
mutex_lock(&ifbdev->helper.dev->struct_mutex);
intel_unpin_fb_vma(ifbdev->vma);
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
+ }
+ if (ifbdev->fb)
drm_framebuffer_remove(&ifbdev->fb->base);
- }
kfree(ifbdev);
}
@@ -765,7 +766,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
struct intel_fbdev *ifbdev = dev_priv->fbdev;
struct fb_info *info;
- if (!ifbdev || !ifbdev->fb)
+ if (!ifbdev || !ifbdev->vma)
return;
info = ifbdev->helper.fbdev;
@@ -812,7 +813,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
- if (ifbdev && ifbdev->fb)
+ if (ifbdev && ifbdev->vma)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
@@ -824,7 +825,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
return;
intel_fbdev_sync(ifbdev);
- if (!ifbdev->fb)
+ if (!ifbdev->vma)
return;
if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index d15cc9d3a5cd..89dc25a5a53b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -246,9 +246,9 @@ static int igt_dmabuf_export_vmap(void *arg)
i915_gem_object_put(obj);
ptr = dma_buf_vmap(dmabuf);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
- pr_err("dma_buf_vmap failed with err=%d\n", err);
+ if (!ptr) {
+ pr_err("dma_buf_vmap failed\n");
+ err = -ENOMEM;
goto out;
}
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index bf2e5be1ab30..e37b55a23a65 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,4 +1,5 @@
-mediatek-drm-y := mtk_disp_ovl.o \
+mediatek-drm-y := mtk_disp_color.o \
+ mtk_disp_ovl.o \
mtk_disp_rdma.o \
mtk_drm_crtc.o \
mtk_drm_ddp.o \
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
new file mode 100644
index 000000000000..ef79a6d55646
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_COLOR_CFG_MAIN 0x0400
+#define DISP_COLOR_START_MT2701 0x0f00
+#define DISP_COLOR_START_MT8173 0x0c00
+#define DISP_COLOR_START(comp) ((comp)->data->color_offset)
+#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50)
+#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54)
+
+#define COLOR_BYPASS_ALL BIT(7)
+#define COLOR_SEQ_SEL BIT(13)
+
+struct mtk_disp_color_data {
+ unsigned int color_offset;
+};
+
+/**
+ * struct mtk_disp_color - DISP_COLOR driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report irq events to
+ */
+struct mtk_disp_color {
+ struct mtk_ddp_comp ddp_comp;
+ struct drm_crtc *crtc;
+ const struct mtk_disp_color_data *data;
+};
+
+static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
+{
+ return container_of(comp, struct mtk_disp_color, ddp_comp);
+}
+
+static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ struct mtk_disp_color *color = comp_to_color(comp);
+
+ writel(w, comp->regs + DISP_COLOR_WIDTH(color));
+ writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
+}
+
+static void mtk_color_start(struct mtk_ddp_comp *comp)
+{
+ struct mtk_disp_color *color = comp_to_color(comp);
+
+ writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
+ comp->regs + DISP_COLOR_CFG_MAIN);
+ writel(0x1, comp->regs + DISP_COLOR_START(color));
+}
+
+static const struct mtk_ddp_comp_funcs mtk_disp_color_funcs = {
+ .config = mtk_color_config,
+ .start = mtk_color_start,
+};
+
+static int mtk_disp_color_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_disp_color *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register component %s: %d\n",
+ dev->of_node->full_name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_disp_color_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_disp_color *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+
+ mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
+}
+
+static const struct component_ops mtk_disp_color_component_ops = {
+ .bind = mtk_disp_color_bind,
+ .unbind = mtk_disp_color_unbind,
+};
+
+static int mtk_disp_color_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_disp_color *priv;
+ int comp_id;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_COLOR);
+ if (comp_id < 0) {
+ dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+ return comp_id;
+ }
+
+ ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
+ &mtk_disp_color_funcs);
+ if (ret) {
+ dev_err(dev, "Failed to initialize component: %d\n", ret);
+ return ret;
+ }
+
+ priv->data = of_device_get_match_data(dev);
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = component_add(dev, &mtk_disp_color_component_ops);
+ if (ret)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+
+ return ret;
+}
+
+static int mtk_disp_color_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_disp_color_component_ops);
+
+ return 0;
+}
+
+static const struct mtk_disp_color_data mt2701_color_driver_data = {
+ .color_offset = DISP_COLOR_START_MT2701,
+};
+
+static const struct mtk_disp_color_data mt8173_color_driver_data = {
+ .color_offset = DISP_COLOR_START_MT8173,
+};
+
+static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
+ { .compatible = "mediatek,mt2701-disp-color",
+ .data = &mt2701_color_driver_data},
+ { .compatible = "mediatek,mt8173-disp-color",
+ .data = &mt8173_color_driver_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_color_driver_dt_match);
+
+struct platform_driver mtk_disp_color_driver = {
+ .probe = mtk_disp_color_probe,
+ .remove = mtk_disp_color_remove,
+ .driver = {
+ .name = "mediatek-disp-color",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_disp_color_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index a14d7d64d7b1..35bc5babdbf7 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -42,9 +42,12 @@
#define OVL_RDMA_MEM_GMC 0x40402020
#define OVL_CON_BYTE_SWAP BIT(24)
+#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (2 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (3 << 12)
+#define OVL_CON_CLRFMT_UYVY (4 << 12)
+#define OVL_CON_CLRFMT_YUYV (5 << 12)
#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
0 : OVL_CON_CLRFMT_RGB)
#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
@@ -176,6 +179,10 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP;
+ case DRM_FORMAT_UYVY:
+ return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB;
+ case DRM_FORMAT_YUYV:
+ return OVL_CON_CLRFMT_YUYV | OVL_CON_MTX_YUV_TO_RGB;
}
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 6582e1f56d37..cb32c9369f3a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -559,6 +559,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
sizeof(*mtk_crtc->ddp_comp),
GFP_KERNEL);
+ if (!mtk_crtc->ddp_comp)
+ return -ENOMEM;
mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe);
if (IS_ERR(mtk_crtc->mutex)) {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 8b52416b6e41..07d7ea2268ef 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -38,13 +38,6 @@
#define DISP_REG_UFO_START 0x0000
-#define DISP_COLOR_CFG_MAIN 0x0400
-#define DISP_COLOR_START_MT2701 0x0f00
-#define DISP_COLOR_START_MT8173 0x0c00
-#define DISP_COLOR_START(comp) ((comp)->data->color_offset)
-#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50)
-#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54)
-
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
@@ -55,9 +48,6 @@
#define LUT_10BIT_MASK 0x03ff
-#define COLOR_BYPASS_ALL BIT(7)
-#define COLOR_SEQ_SEL BIT(13)
-
#define OD_RELAYMODE BIT(0)
#define UFO_BYPASS BIT(2)
@@ -82,20 +72,6 @@
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
-struct mtk_disp_color_data {
- unsigned int color_offset;
-};
-
-struct mtk_disp_color {
- struct mtk_ddp_comp ddp_comp;
- const struct mtk_disp_color_data *data;
-};
-
-static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
-{
- return container_of(comp, struct mtk_disp_color, ddp_comp);
-}
-
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
unsigned int CFG)
{
@@ -119,25 +95,6 @@ void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
}
}
-static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
-{
- struct mtk_disp_color *color = comp_to_color(comp);
-
- writel(w, comp->regs + DISP_COLOR_WIDTH(color));
- writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
-}
-
-static void mtk_color_start(struct mtk_ddp_comp *comp)
-{
- struct mtk_disp_color *color = comp_to_color(comp);
-
- writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
- comp->regs + DISP_COLOR_CFG_MAIN);
- writel(0x1, comp->regs + DISP_COLOR_START(color));
-}
-
static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc)
@@ -229,11 +186,6 @@ static const struct mtk_ddp_comp_funcs ddp_gamma = {
.stop = mtk_gamma_stop,
};
-static const struct mtk_ddp_comp_funcs ddp_color = {
- .config = mtk_color_config,
- .start = mtk_color_start,
-};
-
static const struct mtk_ddp_comp_funcs ddp_od = {
.config = mtk_od_config,
.start = mtk_od_start,
@@ -268,8 +220,8 @@ struct mtk_ddp_comp_match {
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
- [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
- [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
+ [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
+ [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
[DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL },
@@ -286,22 +238,6 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
-static const struct mtk_disp_color_data mt2701_color_driver_data = {
- .color_offset = DISP_COLOR_START_MT2701,
-};
-
-static const struct mtk_disp_color_data mt8173_color_driver_data = {
- .color_offset = DISP_COLOR_START_MT8173,
-};
-
-static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
- { .compatible = "mediatek,mt2701-disp-color",
- .data = &mt2701_color_driver_data},
- { .compatible = "mediatek,mt8173-disp-color",
- .data = &mt8173_color_driver_data},
- {},
-};
-
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type)
{
@@ -324,23 +260,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
enum mtk_ddp_comp_type type;
struct device_node *larb_node;
struct platform_device *larb_pdev;
- const struct of_device_id *match;
- struct mtk_disp_color *color;
if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
return -EINVAL;
type = mtk_ddp_matches[comp_id].type;
- if (type == MTK_DISP_COLOR) {
- devm_kfree(dev, comp);
- color = devm_kzalloc(dev, sizeof(*color), GFP_KERNEL);
- if (!color)
- return -ENOMEM;
-
- match = of_match_node(mtk_disp_color_driver_dt_match, node);
- color->data = match->data;
- comp = &color->ddp_comp;
- }
comp->id = comp_id;
comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index f6c8ec4c7dbc..41d2cffe953e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -439,11 +439,12 @@ static int mtk_drm_probe(struct platform_device *pdev)
private->comp_node[comp_id] = of_node_get(node);
/*
- * Currently only the OVL, RDMA, DSI, and DPI blocks have
+ * Currently only the COLOR, OVL, RDMA, DSI, and DPI blocks have
* separate component platform drivers and initialize their own
* DDP component structure. The others are initialized here.
*/
- if (comp_type == MTK_DISP_OVL ||
+ if (comp_type == MTK_DISP_COLOR ||
+ comp_type == MTK_DISP_OVL ||
comp_type == MTK_DISP_RDMA ||
comp_type == MTK_DSI ||
comp_type == MTK_DPI) {
@@ -566,6 +567,7 @@ static struct platform_driver mtk_drm_platform_driver = {
static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_ddp_driver,
+ &mtk_disp_color_driver,
&mtk_disp_ovl_driver,
&mtk_disp_rdma_driver,
&mtk_dpi_driver,
@@ -576,33 +578,14 @@ static struct platform_driver * const mtk_drm_drivers[] = {
static int __init mtk_drm_init(void)
{
- int ret;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mtk_drm_drivers); i++) {
- ret = platform_driver_register(mtk_drm_drivers[i]);
- if (ret < 0) {
- pr_err("Failed to register %s driver: %d\n",
- mtk_drm_drivers[i]->driver.name, ret);
- goto err;
- }
- }
-
- return 0;
-
-err:
- while (--i >= 0)
- platform_driver_unregister(mtk_drm_drivers[i]);
-
- return ret;
+ return platform_register_drivers(mtk_drm_drivers,
+ ARRAY_SIZE(mtk_drm_drivers));
}
static void __exit mtk_drm_exit(void)
{
- int i;
-
- for (i = ARRAY_SIZE(mtk_drm_drivers) - 1; i >= 0; i--)
- platform_driver_unregister(mtk_drm_drivers[i]);
+ platform_unregister_drivers(mtk_drm_drivers,
+ ARRAY_SIZE(mtk_drm_drivers));
}
module_init(mtk_drm_init);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index aef8747d810b..c3378c452c0a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -59,6 +59,7 @@ struct mtk_drm_private {
};
extern struct platform_driver mtk_ddp_driver;
+extern struct platform_driver mtk_disp_color_driver;
extern struct platform_driver mtk_disp_ovl_driver;
extern struct platform_driver mtk_disp_rdma_driver;
extern struct platform_driver mtk_dpi_driver;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index e405e89ed5e5..1a59b9ab4aa8 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -28,6 +28,8 @@ static const u32 formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
};
static void mtk_plane_reset(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index b5cc6e12334c..97253c8f813b 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -930,7 +930,7 @@ static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
DRM_INFO("type is 0x02, try again\n");
break;
default:
- DRM_INFO("type(0x%x) cannot be non-recognite\n", type);
+ DRM_INFO("type(0x%x) not recognized\n", type);
break;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 0a4ffd724146..71eb4fbbfc85 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1778,33 +1778,14 @@ static struct platform_driver * const mtk_hdmi_drivers[] = {
static int __init mtk_hdmitx_init(void)
{
- int ret;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mtk_hdmi_drivers); i++) {
- ret = platform_driver_register(mtk_hdmi_drivers[i]);
- if (ret < 0) {
- pr_err("Failed to register %s driver: %d\n",
- mtk_hdmi_drivers[i]->driver.name, ret);
- goto err;
- }
- }
-
- return 0;
-
-err:
- while (--i >= 0)
- platform_driver_unregister(mtk_hdmi_drivers[i]);
-
- return ret;
+ return platform_register_drivers(mtk_hdmi_drivers,
+ ARRAY_SIZE(mtk_hdmi_drivers));
}
static void __exit mtk_hdmitx_exit(void)
{
- int i;
-
- for (i = ARRAY_SIZE(mtk_hdmi_drivers) - 1; i >= 0; i--)
- platform_driver_unregister(mtk_hdmi_drivers[i]);
+ platform_unregister_drivers(mtk_hdmi_drivers,
+ ARRAY_SIZE(mtk_hdmi_drivers));
}
module_init(mtk_hdmitx_init);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index fa4f8f008e4d..e67ed383e11b 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -31,6 +31,7 @@
#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
+#include <linux/dmi.h>
extern int atom_debug;
@@ -2184,9 +2185,17 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
goto assigned;
}
- /* on DCE32 and encoder can driver any block so just crtc id */
+ /*
+ * On DCE32 any encoder can drive any block so usually just use crtc id,
+ * but Apple thinks different at least on iMac10,1, so there use linkb,
+ * otherwise the internal eDP panel will stay dark.
+ */
if (ASIC_IS_DCE32(rdev)) {
- enc_idx = radeon_crtc->crtc_id;
+ if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
+ enc_idx = (dig->linkb) ? 1 : 0;
+ else
+ enc_idx = radeon_crtc->crtc_id;
+
goto assigned;
}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 14fa1f8351e8..9b0b0588bbed 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -1195,7 +1195,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
continue;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
- if (!dp)
+ if (!port)
return -ENOMEM;
port->extcon = extcon;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 47905faf5586..c7e96b82cf63 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -45,13 +45,13 @@ struct rockchip_crtc_state {
*
* @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc.
* @num_pipe: number of pipes for this device.
+ * @mm_lock: protect drm_mm on multi-threads.
*/
struct rockchip_drm_private {
struct drm_fb_helper fbdev_helper;
struct drm_gem_object *fbdev_bo;
struct drm_atomic_state *state;
struct iommu_domain *domain;
- /* protect drm_mm on multi-threads */
struct mutex mm_lock;
struct drm_mm mm;
struct list_head psr_list;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index df9e57064f19..b74ac717e56a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -29,12 +29,11 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
ssize_t ret;
mutex_lock(&private->mm_lock);
-
ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
rk_obj->base.size, PAGE_SIZE,
0, 0);
-
mutex_unlock(&private->mm_lock);
+
if (ret < 0) {
DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
return ret;
@@ -56,7 +55,9 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
return 0;
err_remove_node:
+ mutex_lock(&private->mm_lock);
drm_mm_remove_node(&rk_obj->mm);
+ mutex_unlock(&private->mm_lock);
return ret;
}
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 41b39464ded8..501e16a9227d 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2732,6 +2732,9 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
hidpp_battery_props,
sizeof(hidpp_battery_props),
GFP_KERNEL);
+ if (!battery_props)
+ return -ENOMEM;
+
num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f3e35e7a189d..aff20f4b6d97 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -620,16 +620,6 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
-static int mt_touch_input_mapped(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- if (usage->type == EV_KEY || usage->type == EV_ABS)
- set_bit(usage->type, hi->input->evbit);
-
- return -1;
-}
-
static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
{
__s32 quirks = td->mtclass.quirks;
@@ -969,8 +959,10 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
return 0;
if (field->application == HID_DG_TOUCHSCREEN ||
- field->application == HID_DG_TOUCHPAD)
- return mt_touch_input_mapped(hdev, hi, field, usage, bit, max);
+ field->application == HID_DG_TOUCHPAD) {
+ /* We own these mappings, tell hid-input to ignore them */
+ return -1;
+ }
/* let hid-core decide for the others */
return 0;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a6cb379a4ebc..01236cef7bfb 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -268,6 +268,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
return ret;
ret = rdma_copy_addr(dev_addr, dev, NULL);
+ dev_addr->bound_dev_if = dev->ifindex;
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
dev_put(dev);
@@ -280,6 +281,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
&((const struct sockaddr_in6 *)addr)->sin6_addr,
dev, 1)) {
ret = rdma_copy_addr(dev_addr, dev, NULL);
+ dev_addr->bound_dev_if = dev->ifindex;
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
break;
@@ -405,10 +407,10 @@ static int addr4_resolve(struct sockaddr_in *src_in,
fl4.saddr = src_ip;
fl4.flowi4_oif = addr->bound_dev_if;
rt = ip_route_output_key(addr->net, &fl4);
- if (IS_ERR(rt)) {
- ret = PTR_ERR(rt);
- goto out;
- }
+ ret = PTR_ERR_OR_ZERO(rt);
+ if (ret)
+ return ret;
+
src_in->sin_family = AF_INET;
src_in->sin_addr.s_addr = fl4.saddr;
@@ -423,8 +425,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
*prt = rt;
return 0;
-out:
- return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -509,6 +509,11 @@ static int addr_resolve(struct sockaddr *src_in,
struct dst_entry *dst;
int ret;
+ if (!addr->net) {
+ pr_warn_ratelimited("%s: missing namespace\n", __func__);
+ return -EINVAL;
+ }
+
if (src_in->sa_family == AF_INET) {
struct rtable *rt = NULL;
const struct sockaddr_in *dst_in4 =
@@ -522,8 +527,12 @@ static int addr_resolve(struct sockaddr *src_in,
if (resolve_neigh)
ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
- ndev = rt->dst.dev;
- dev_hold(ndev);
+ if (addr->bound_dev_if) {
+ ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+ } else {
+ ndev = rt->dst.dev;
+ dev_hold(ndev);
+ }
ip_rt_put(rt);
} else {
@@ -539,14 +548,27 @@ static int addr_resolve(struct sockaddr *src_in,
if (resolve_neigh)
ret = addr_resolve_neigh(dst, dst_in, addr, seq);
- ndev = dst->dev;
- dev_hold(ndev);
+ if (addr->bound_dev_if) {
+ ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+ } else {
+ ndev = dst->dev;
+ dev_hold(ndev);
+ }
dst_release(dst);
}
- addr->bound_dev_if = ndev->ifindex;
- addr->net = dev_net(ndev);
+ if (ndev->flags & IFF_LOOPBACK) {
+ ret = rdma_translate_ip(dst_in, addr, NULL);
+ /*
+ * Put the loopback device and get the translated
+ * device instead.
+ */
+ dev_put(ndev);
+ ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+ } else {
+ addr->bound_dev_if = ndev->ifindex;
+ }
dev_put(ndev);
return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 31bb82d8ecd7..11aff923b633 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -623,22 +623,11 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
return ret;
- if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
+ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
ndev = dev_get_by_index(&init_net, bound_if_index);
- if (ndev && ndev->flags & IFF_LOOPBACK) {
- pr_info("detected loopback device\n");
- dev_put(ndev);
-
- if (!device->get_netdev)
- return -EOPNOTSUPP;
-
- ndev = device->get_netdev(device, port);
- if (!ndev)
- return -ENODEV;
- }
- } else {
+ else
gid_type = IB_GID_TYPE_IB;
- }
+
ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
ndev, NULL);
@@ -2569,21 +2558,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
- if (ndev->flags & IFF_LOOPBACK) {
- dev_put(ndev);
- if (!id_priv->id.device->get_netdev) {
- ret = -EOPNOTSUPP;
- goto err2;
- }
-
- ndev = id_priv->id.device->get_netdev(id_priv->id.device,
- id_priv->id.port_num);
- if (!ndev) {
- ret = -ENODEV;
- goto err2;
- }
- }
-
supported_gids = roce_gid_type_mask_support(id_priv->id.device,
id_priv->id.port_num);
gid_type = cma_route_gid_type(addr->dev_addr.network,
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index db958d3207ef..94a9eefb3cfc 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -42,6 +42,8 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
+static struct workqueue_struct *gid_cache_wq;
+
enum gid_op_type {
GID_DEL = 0,
GID_ADD
@@ -560,7 +562,7 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
}
INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
- queue_work(ib_wq, &ndev_work->work);
+ queue_work(gid_cache_wq, &ndev_work->work);
return NOTIFY_DONE;
}
@@ -693,7 +695,7 @@ static int addr_event(struct notifier_block *this, unsigned long event,
dev_hold(ndev);
work->gid_attr.ndev = ndev;
- queue_work(ib_wq, &work->work);
+ queue_work(gid_cache_wq, &work->work);
return NOTIFY_DONE;
}
@@ -740,6 +742,10 @@ static struct notifier_block nb_inet6addr = {
int __init roce_gid_mgmt_init(void)
{
+ gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
+ if (!gid_cache_wq)
+ return -ENOMEM;
+
register_inetaddr_notifier(&nb_inetaddr);
if (IS_ENABLED(CONFIG_IPV6))
register_inet6addr_notifier(&nb_inet6addr);
@@ -764,4 +770,5 @@ void __exit roce_gid_mgmt_cleanup(void)
* ib-core is removed, all physical devices have been removed,
* so no issue with remaining hardware contexts.
*/
+ destroy_workqueue(gid_cache_wq);
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 8ba9bfb073d1..3f55d18a3791 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2005,28 +2005,13 @@ static int modify_qp(struct ib_uverbs_file *file,
rdma_ah_set_port_num(&attr->alt_ah_attr,
cmd->base.alt_dest.port_num);
- if (qp->real_qp == qp) {
- if (cmd->base.attr_mask & IB_QP_AV) {
- ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
- if (ret)
- goto release_qp;
- }
- ret = ib_security_modify_qp(qp,
- attr,
- modify_qp_mask(qp->qp_type,
- cmd->base.attr_mask),
- udata);
- } else {
- ret = ib_security_modify_qp(qp,
- attr,
- modify_qp_mask(qp->qp_type,
- cmd->base.attr_mask),
- NULL);
- }
+ ret = ib_modify_qp_with_udata(qp, attr,
+ modify_qp_mask(qp->qp_type,
+ cmd->base.attr_mask),
+ udata);
release_qp:
uobj_put_obj_read(qp);
-
out:
kfree(attr);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c973a83c898b..fb98ed67d5bc 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -452,6 +452,19 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
}
EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
+/*
+ * This function creates ah from the incoming packet.
+ * Incoming packet has dgid of the receiver node on which this code is
+ * getting executed and, sgid contains the GID of the sender.
+ *
+ * When resolving mac address of destination, the arrived dgid is used
+ * as sgid and, sgid is used as dgid because sgid contains destinations
+ * GID whom to respond to.
+ *
+ * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
+ * position of arguments dgid and sgid do not match the order of the
+ * parameters.
+ */
int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
struct rdma_ah_attr *ah_attr)
@@ -507,11 +520,6 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
}
resolved_dev = dev_get_by_index(&init_net, if_index);
- if (resolved_dev->flags & IFF_LOOPBACK) {
- dev_put(resolved_dev);
- resolved_dev = idev;
- dev_hold(resolved_dev);
- }
rcu_read_lock();
if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
resolved_dev))
@@ -887,6 +895,7 @@ static const struct {
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
[IB_QPS_INIT] = {
.valid = 1,
.req_param = {
@@ -1268,20 +1277,36 @@ out:
}
EXPORT_SYMBOL(ib_resolve_eth_dmac);
-int ib_modify_qp(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr,
- int qp_attr_mask)
+/**
+ * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
+ * @qp: The QP to modify.
+ * @attr: On input, specifies the QP attributes to modify. On output,
+ * the current values of selected QP attributes are returned.
+ * @attr_mask: A bit-mask used to specify which attributes of the QP
+ * are being modified.
+ * @udata: pointer to user's input output buffer information
+ * are being modified.
+ * It returns 0 on success and returns appropriate error code on error.
+ */
+int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
{
+ int ret;
- if (qp_attr_mask & IB_QP_AV) {
- int ret;
-
- ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
+ if (attr_mask & IB_QP_AV) {
+ ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
if (ret)
return ret;
}
+ return ib_security_modify_qp(qp, attr, attr_mask, udata);
+}
+EXPORT_SYMBOL(ib_modify_qp_with_udata);
- return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
+int ib_modify_qp(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask)
+{
+ return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 2ba00b89df6a..94b54850ec75 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12847,7 +12847,12 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
/* clear from the handled mask of the general interrupt */
m = isrc / 64;
n = isrc % 64;
- dd->gi_mask[m] &= ~((u64)1 << n);
+ if (likely(m < CCE_NUM_INT_CSRS)) {
+ dd->gi_mask[m] &= ~((u64)1 << n);
+ } else {
+ dd_dev_err(dd, "remap interrupt err\n");
+ return;
+ }
/* direct the chip source to the given MSI-X interrupt */
m = isrc / 8;
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 650305cc0373..1a7af9f60c13 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -647,18 +647,17 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
qp->pid);
}
-void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- gfp_t gfp)
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv;
- priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
+ priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->owner = qp;
- priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp,
+ priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
rdi->dparms.node);
if (!priv->s_ahg) {
kfree(priv);
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index 1eb9cd7b8c19..6fe542b6a927 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -123,8 +123,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp);
/*
* Functions provided by hfi1 driver for rdmavt to use
*/
-void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- gfp_t gfp);
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
unsigned free_all_qps(struct rvt_dev_info *rdi);
void notify_qp_reset(struct rvt_qp *qp);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 37d5d29597a4..23fad6d96944 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -228,14 +228,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_READ:
ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
- set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
- atomic_wr(wr)->rkey);
+ set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+ rdma_wr(wr)->rkey);
break;
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
- set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
- atomic_wr(wr)->rkey);
+ set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+ rdma_wr(wr)->rkey);
break;
case IB_WR_SEND:
case IB_WR_SEND_WITH_INV:
@@ -661,9 +661,11 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
union ib_gid dgid;
u64 subnet_prefix;
int attr_mask = 0;
- int i;
+ int i, j;
int ret;
+ u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
u8 phy_port;
+ u8 port = 0;
u8 sl;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
@@ -709,11 +711,27 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
attr.rnr_retry = 7;
attr.timeout = 0x12;
attr.path_mtu = IB_MTU_256;
+ attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
rdma_ah_set_static_rate(&attr.ah_attr, 3);
subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+ phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
+ (i % HNS_ROCE_MAX_PORTS);
+ sl = i / HNS_ROCE_MAX_PORTS;
+
+ for (j = 0; j < caps->num_ports; j++) {
+ if (hr_dev->iboe.phy_port[j] == phy_port) {
+ queue_en[i] = 1;
+ port = j;
+ break;
+ }
+ }
+
+ if (!queue_en[i])
+ continue;
+
free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
if (IS_ERR(free_mr->mr_free_qp[i])) {
dev_err(dev, "Create loop qp failed!\n");
@@ -721,15 +739,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
}
hr_qp = free_mr->mr_free_qp[i];
- sl = i / caps->num_ports;
-
- if (caps->num_ports == HNS_ROCE_MAX_PORTS)
- phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
- (i % caps->num_ports);
- else
- phy_port = i % caps->num_ports;
-
- hr_qp->port = phy_port + 1;
+ hr_qp->port = port;
hr_qp->phy_port = phy_port;
hr_qp->ibqp.qp_type = IB_QPT_RC;
hr_qp->ibqp.device = &hr_dev->ib_dev;
@@ -739,23 +749,22 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
hr_qp->ibqp.recv_cq = cq;
hr_qp->ibqp.send_cq = cq;
- rdma_ah_set_port_num(&attr.ah_attr, phy_port + 1);
- rdma_ah_set_sl(&attr.ah_attr, phy_port + 1);
- attr.port_num = phy_port + 1;
+ rdma_ah_set_port_num(&attr.ah_attr, port + 1);
+ rdma_ah_set_sl(&attr.ah_attr, sl);
+ attr.port_num = port + 1;
attr.dest_qp_num = hr_qp->qpn;
memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
- hr_dev->dev_addr[phy_port],
+ hr_dev->dev_addr[port],
MAC_ADDR_OCTET_NUM);
memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
- memcpy(&dgid.raw[8], hr_dev->dev_addr[phy_port], 3);
- memcpy(&dgid.raw[13], hr_dev->dev_addr[phy_port] + 3, 3);
+ memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
+ memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
dgid.raw[11] = 0xff;
dgid.raw[12] = 0xfe;
dgid.raw[8] ^= 2;
rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
- attr_mask |= IB_QP_PORT;
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
IB_QPS_RESET, IB_QPS_INIT);
@@ -812,6 +821,9 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
+ if (!hr_qp)
+ continue;
+
ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
if (ret)
dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
@@ -963,7 +975,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
int i;
int ret;
- int ne;
+ int ne = 0;
mr_work = container_of(work, struct hns_roce_mr_free_work, work);
hr_mr = (struct hns_roce_mr *)mr_work->mr;
@@ -976,6 +988,10 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
+ if (!hr_qp)
+ continue;
+ ne++;
+
ret = hns_roce_v1_send_lp_wqe(hr_qp);
if (ret) {
dev_err(dev,
@@ -985,7 +1001,6 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
}
}
- ne = HNS_ROCE_V1_RESV_QP;
do {
ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
if (ret < 0) {
@@ -995,7 +1010,8 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
goto free_work;
}
ne -= ret;
- msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
+ usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
+ (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
} while (ne && time_before_eq(jiffies, end));
if (ne != 0)
@@ -2181,7 +2197,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
}
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
- } else {
+ } else {
/* RQ conrespond to CQE */
wc->byte_len = le32_to_cpu(cqe->byte_cnt);
opcode = roce_get_field(cqe->cqe_byte_4,
@@ -3533,10 +3549,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
old_cnt = roce_get_field(old_send,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
- if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+ if (cur_cnt - old_cnt >
+ SDB_ST_CMP_VAL) {
success_flags = 1;
- else {
- send_ptr = roce_get_field(old_send,
+ } else {
+ send_ptr =
+ roce_get_field(old_send,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
roce_get_field(sdb_retry_cnt,
@@ -3641,6 +3659,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
struct hns_roce_dev *hr_dev;
struct hns_roce_qp *hr_qp;
struct device *dev;
+ unsigned long qpn;
int ret;
qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
@@ -3648,8 +3667,9 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
dev = &hr_dev->pdev->dev;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
hr_qp = qp_work_entry->qp;
+ qpn = hr_qp->qpn;
- dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn);
+ dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
qp_work_entry->sche_cnt++;
@@ -3660,7 +3680,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
&qp_work_entry->db_wait_stage);
if (ret) {
dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
- hr_qp->qpn);
+ qpn);
return;
}
@@ -3674,7 +3694,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
IB_QPS_RESET);
if (ret) {
- dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn);
+ dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
return;
}
@@ -3683,14 +3703,14 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
/* RC QP, release QPN */
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+ hns_roce_release_range_qp(hr_dev, qpn, 1);
kfree(hr_qp);
} else
kfree(hr_to_hr_sqp(hr_qp));
kfree(qp_work_entry);
- dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn);
+ dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
}
int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c3b41f95e70a..d9777b662eba 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -125,8 +125,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
return -ENODEV;
}
- spin_lock_bh(&hr_dev->iboe.lock);
-
switch (event) {
case NETDEV_UP:
case NETDEV_CHANGE:
@@ -144,7 +142,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
break;
}
- spin_unlock_bh(&hr_dev->iboe.lock);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 4f5a143fc0a7..ff931c580557 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
int err;
err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
- PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
+ PAGE_SIZE * 2, &buf->buf);
if (err)
goto out;
@@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
if (err)
goto err_mtt;
@@ -219,7 +219,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
uar = &to_mucontext(context)->uar;
} else {
- err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
+ err = mlx4_db_alloc(dev->dev, &cq->db, 1);
if (err)
goto err_cq;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 75b2f7d4cd95..d1b43cbbfea7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1155,7 +1155,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
* call to mlx4_ib_vma_close.
*/
put_task_struct(owning_process);
- msleep(1);
+ usleep_range(1000, 2000);
owning_process = get_pid_task(ibcontext->tgid,
PIDTYPE_PID);
if (!owning_process ||
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 3405e947dc1e..b73f89700ef9 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
if (!count)
break;
- msleep(1);
+ usleep_range(1000, 2000);
} while (time_after(end, jiffies));
flush_workqueue(ctx->mcg_wq);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index c2b9cbf4da05..9db82e67e959 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -185,7 +185,6 @@ enum mlx4_ib_qp_flags {
MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
- MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
/* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 996e9058e515..75c0e6c5dd56 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -634,8 +634,8 @@ static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp,
- gfp_t gfp)
+ struct ib_udata *udata, int sqpn,
+ struct mlx4_ib_qp **caller_qp)
{
int qpn;
int err;
@@ -691,14 +691,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
(qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
- sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp);
+ sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
if (!sqp)
return -ENOMEM;
qp = &sqp->qp;
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
} else {
- qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp);
+ qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
qp->pri.vid = 0xFFFF;
@@ -780,7 +780,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
if (qp_has_rq(init_attr)) {
- err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp);
+ err = mlx4_db_alloc(dev->dev, &qp->db, 0);
if (err)
goto err;
@@ -788,7 +788,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
- &qp->buf, gfp)) {
+ &qp->buf)) {
memcpy(&init_attr->cap, &backup_cap,
sizeof(backup_cap));
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
@@ -797,7 +797,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_db;
if (mlx4_buf_alloc(dev->dev, qp->buf_size,
- PAGE_SIZE * 2, &qp->buf, gfp)) {
+ PAGE_SIZE * 2, &qp->buf)) {
err = -ENOMEM;
goto err_db;
}
@@ -808,20 +808,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp);
+ err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
if (err)
goto err_mtt;
qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64),
- gfp | __GFP_NOWARN);
+ GFP_KERNEL | __GFP_NOWARN);
if (!qp->sq.wrid)
qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
- gfp, PAGE_KERNEL);
+ GFP_KERNEL, PAGE_KERNEL);
qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64),
- gfp | __GFP_NOWARN);
+ GFP_KERNEL | __GFP_NOWARN);
if (!qp->rq.wrid)
qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
- gfp, PAGE_KERNEL);
+ GFP_KERNEL, PAGE_KERNEL);
if (!qp->sq.wrid || !qp->rq.wrid) {
err = -ENOMEM;
goto err_wrid;
@@ -859,7 +859,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
- err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp);
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
if (err)
goto err_qpn;
@@ -1127,10 +1127,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
int err;
int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
u16 xrcdn = 0;
- gfp_t gfp;
- gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
- GFP_NOIO : GFP_KERNEL;
/*
* We only support LSO, vendor flag1, and multicast loopback blocking,
* and only for kernel UD QPs.
@@ -1140,8 +1137,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
MLX4_IB_SRIOV_TUNNEL_QP |
MLX4_IB_SRIOV_SQP |
MLX4_IB_QP_NETIF |
- MLX4_IB_QP_CREATE_ROCE_V2_GSI |
- MLX4_IB_QP_CREATE_USE_GFP_NOIO))
+ MLX4_IB_QP_CREATE_ROCE_V2_GSI))
return ERR_PTR(-EINVAL);
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
@@ -1154,7 +1150,6 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
- MLX4_IB_QP_CREATE_USE_GFP_NOIO |
MLX4_IB_QP_CREATE_ROCE_V2_GSI |
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
init_attr->qp_type != IB_QPT_UD) ||
@@ -1179,7 +1174,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_RAW_PACKET:
- qp = kzalloc(sizeof *qp, gfp);
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
qp->pri.vid = 0xFFFF;
@@ -1188,7 +1183,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
case IB_QPT_UD:
{
err = create_qp_common(to_mdev(pd->device), pd, init_attr,
- udata, 0, &qp, gfp);
+ udata, 0, &qp);
if (err) {
kfree(qp);
return ERR_PTR(err);
@@ -1217,8 +1212,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
}
err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
- sqpn,
- &qp, gfp);
+ sqpn, &qp);
if (err)
return ERR_PTR(err);
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index e32dd58937a8..0facaf5f6d23 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -135,14 +135,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_mtt;
} else {
- err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL);
+ err = mlx4_db_alloc(dev->dev, &srq->db, 0);
if (err)
goto err_srq;
*srq->db.db = 0;
- if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
- GFP_KERNEL)) {
+ if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
+ &srq->buf)) {
err = -ENOMEM;
goto err_db;
}
@@ -167,7 +167,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
if (err)
goto err_mtt;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 763bb5b36144..8ab2f1360a45 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -582,6 +582,15 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
}
}
+static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!mlx5_debugfs_root)
+ return;
+
+ debugfs_remove_recursive(dev->cache.root);
+ dev->cache.root = NULL;
+}
+
static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
{
struct mlx5_mr_cache *cache = &dev->cache;
@@ -600,38 +609,34 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
sprintf(ent->name, "%d", ent->order);
ent->dir = debugfs_create_dir(ent->name, cache->root);
if (!ent->dir)
- return -ENOMEM;
+ goto err;
ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
&size_fops);
if (!ent->fsize)
- return -ENOMEM;
+ goto err;
ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
&limit_fops);
if (!ent->flimit)
- return -ENOMEM;
+ goto err;
ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
&ent->cur);
if (!ent->fcur)
- return -ENOMEM;
+ goto err;
ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
&ent->miss);
if (!ent->fmiss)
- return -ENOMEM;
+ goto err;
}
return 0;
-}
-
-static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
-{
- if (!mlx5_debugfs_root)
- return;
+err:
+ mlx5_mr_cache_debugfs_cleanup(dev);
- debugfs_remove_recursive(dev->cache.root);
+ return -ENOMEM;
}
static void delay_time_func(unsigned long ctx)
@@ -692,6 +697,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
if (err)
mlx5_ib_warn(dev, "cache debugfs failure\n");
+ /*
+ * We don't want to fail driver if debugfs failed to initialize,
+ * so we are not forwarding error to the user.
+ */
+
return 0;
}
@@ -1779,7 +1789,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
mr->ndescs = sg_nents;
for_each_sg(sgl, sg, sg_nents, i) {
- if (unlikely(i > mr->max_descs))
+ if (unlikely(i >= mr->max_descs))
break;
klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8f9d8b4ad583..b0adf65e4bdb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -551,7 +551,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
|| (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
int_cnt++;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (int_cnt > 1) {
spin_lock_irqsave(&nesadapter->phy_lock, flags);
@@ -592,7 +592,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
break;
}
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 5984981e7dd4..a343e3b5d4cb 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -104,10 +104,9 @@ const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
};
-static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
- gfp_t gfp)
+static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
{
- unsigned long page = get_zeroed_page(gfp);
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
/*
* Free the page if someone raced with us installing it.
@@ -126,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
* zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
*/
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port, gfp_t gfp)
+ enum ib_qp_type type, u8 port)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
@@ -160,7 +159,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
- get_map_page(qpt, map, gfp);
+ get_map_page(qpt, map);
if (unlikely(!map->page))
break;
}
@@ -317,16 +316,16 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
return ib_mtu_enum_to_int(pmtu);
}
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
struct qib_qp_priv *priv;
- priv = kzalloc(sizeof(*priv), gfp);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->owner = qp;
- priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
+ priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
if (!priv->s_hdr) {
kfree(priv);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index da0db5485ddc..a52fc67b40d7 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -274,11 +274,11 @@ int qib_get_counters(struct qib_pportdata *ppd,
* Functions provided by qib driver for rdmavt to use
*/
unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp);
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qib_notify_qp_reset(struct rvt_qp *qp);
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port, gfp_t gfp);
+ enum ib_qp_type type, u8 port);
void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 727e81cc2c8f..459865439a0b 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -118,10 +118,9 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
EXPORT_SYMBOL(ib_rvt_state_ops);
static void get_map_page(struct rvt_qpn_table *qpt,
- struct rvt_qpn_map *map,
- gfp_t gfp)
+ struct rvt_qpn_map *map)
{
- unsigned long page = get_zeroed_page(gfp);
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
/*
* Free the page if someone raced with us installing it.
@@ -173,7 +172,7 @@ static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
if (!map->page) {
- get_map_page(qpt, map, GFP_KERNEL);
+ get_map_page(qpt, map);
if (!map->page) {
ret = -ENOMEM;
break;
@@ -342,14 +341,14 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
* Return: The queue pair number
*/
static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num, gfp_t gfp)
+ enum ib_qp_type type, u8 port_num)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
u32 ret;
if (rdi->driver_f.alloc_qpn)
- return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
+ return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
unsigned n;
@@ -374,7 +373,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
- get_map_page(qpt, map, gfp);
+ get_map_page(qpt, map);
if (unlikely(!map->page))
break;
}
@@ -672,7 +671,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct ib_qp *ret = ERR_PTR(-ENOMEM);
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
void *priv = NULL;
- gfp_t gfp;
size_t sqsize;
if (!rdi)
@@ -680,18 +678,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
- init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
+ init_attr->create_flags)
return ERR_PTR(-EINVAL);
- /* GFP_NOIO is applicable to RC QP's only */
-
- if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
- init_attr->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
-
- gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
- GFP_NOIO : GFP_KERNEL;
-
/* Check receive queue parameters if no SRQ is specified. */
if (!init_attr->srq) {
if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
@@ -719,14 +708,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
sz = sizeof(struct rvt_sge) *
init_attr->cap.max_send_sge +
sizeof(struct rvt_swqe);
- if (gfp == GFP_NOIO)
- swq = __vmalloc(
- sqsize * sz,
- gfp | __GFP_ZERO, PAGE_KERNEL);
- else
- swq = vzalloc_node(
- sqsize * sz,
- rdi->dparms.node);
+ swq = vzalloc_node(sqsize * sz, rdi->dparms.node);
if (!swq)
return ERR_PTR(-ENOMEM);
@@ -741,7 +723,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
} else if (init_attr->cap.max_recv_sge > 1)
sg_list_sz = sizeof(*qp->r_sg_list) *
(init_attr->cap.max_recv_sge - 1);
- qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
+ qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
+ rdi->dparms.node);
if (!qp)
goto bail_swq;
@@ -751,7 +734,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
kzalloc_node(
sizeof(*qp->s_ack_queue) *
rvt_max_atomic(rdi),
- gfp,
+ GFP_KERNEL,
rdi->dparms.node);
if (!qp->s_ack_queue)
goto bail_qp;
@@ -766,7 +749,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
* Driver needs to set up it's private QP structure and do any
* initialization that is needed.
*/
- priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
+ priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
if (IS_ERR(priv)) {
ret = priv;
goto bail_qp;
@@ -786,11 +769,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->r_rq.wq = vmalloc_user(
sizeof(struct rvt_rwq) +
qp->r_rq.size * sz);
- else if (gfp == GFP_NOIO)
- qp->r_rq.wq = __vmalloc(
- sizeof(struct rvt_rwq) +
- qp->r_rq.size * sz,
- gfp | __GFP_ZERO, PAGE_KERNEL);
else
qp->r_rq.wq = vzalloc_node(
sizeof(struct rvt_rwq) +
@@ -824,7 +802,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
init_attr->qp_type,
- init_attr->port_num, gfp);
+ init_attr->port_num);
if (err < 0) {
ret = ERR_PTR(err);
goto bail_rq_wq;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c3a140ed4df2..08f3f90d2912 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -441,6 +441,8 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
if (unlikely(qp->need_req_skb &&
skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
rxe_run_task(&qp->req.task, 1);
+
+ rxe_drop_ref(qp);
}
int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
@@ -473,6 +475,7 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
return -EAGAIN;
}
+ rxe_add_ref(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
kfree_skb(skb);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 23039768f541..be944d5aa9af 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -995,7 +995,9 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
free_rd_atomic_resource(qp, res);
rxe_advance_resp_resource(qp);
- memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
+ memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
+ memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
+ sizeof(skb->cb) - sizeof(ack_pkt));
res->type = RXE_ATOMIC_MASK;
res->atomic.skb = skb;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 073e66783f1d..07511718d98d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1240,6 +1240,8 @@ int rxe_register_device(struct rxe_dev *rxe)
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
dev->dev.dma_ops = &dma_virt_ops;
+ dma_coerce_mask_and_coherent(&dev->dev,
+ dma_get_required_mask(dev->dev.parent));
dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7cbcfdac6529..f87d104837dc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -39,6 +39,7 @@
#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include "ipoib.h"
@@ -954,7 +955,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
break;
}
spin_unlock_irq(&priv->lock);
- msleep(1);
+ usleep_range(1000, 2000);
ipoib_drain_cq(dev);
spin_lock_irq(&priv->lock);
}
@@ -1047,9 +1048,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
.qp_context = tx,
- .create_flags = IB_QP_CREATE_USE_GFP_NOIO
+ .create_flags = 0
};
-
struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
@@ -1057,10 +1057,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
tx_qp = ib_create_qp(priv->pd, &attr);
- if (PTR_ERR(tx_qp) == -EINVAL) {
- attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
- tx_qp = ib_create_qp(priv->pd, &attr);
- }
tx->max_send_sge = attr.cap.max_send_sge;
return tx_qp;
}
@@ -1131,10 +1127,11 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
struct sa_path_rec *pathrec)
{
struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ unsigned int noio_flag;
int ret;
- p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
- GFP_NOIO, PAGE_KERNEL);
+ noio_flag = memalloc_noio_save();
+ p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
if (!p->tx_ring) {
ret = -ENOMEM;
goto err_tx;
@@ -1142,9 +1139,10 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
+ memalloc_noio_restore(noio_flag);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
- ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
+ ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
goto err_qp;
}
@@ -1206,7 +1204,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
goto timeout;
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index efe7402f4885..57a9655e844d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -770,7 +770,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
ipoib_drain_cq(dev);
- msleep(1);
+ usleep_range(1000, 2000);
}
ipoib_dbg(priv, "All sends and receives done.\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6e86eeee370e..70dacaf9044e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -233,6 +233,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret = 0;
/* dev->mtu > 2K ==> connected mode */
if (ipoib_cm_admin_enabled(dev)) {
@@ -256,9 +257,34 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
ipoib_dbg(priv, "MTU must be smaller than the underlying "
"link layer MTU - 4 (%u)\n", priv->mcast_mtu);
- dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
+ new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
- return 0;
+ if (priv->rn_ops->ndo_change_mtu) {
+ bool carrier_status = netif_carrier_ok(dev);
+
+ netif_carrier_off(dev);
+
+ /* notify lower level on the real mtu */
+ ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
+
+ if (carrier_status)
+ netif_carrier_on(dev);
+ } else {
+ dev->mtu = new_mtu;
+ }
+
+ return ret;
+}
+
+static void ipoib_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (priv->rn_ops->ndo_get_stats64)
+ priv->rn_ops->ndo_get_stats64(dev, stats);
+ else
+ netdev_stats_to_stats64(stats, &dev->stats);
}
/* Called with an RCU read lock taken */
@@ -1808,6 +1834,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_get_vf_stats = ipoib_get_vf_stats,
.ndo_set_vf_guid = ipoib_set_vf_guid,
.ndo_set_mac_address = ipoib_set_mac,
+ .ndo_get_stats64 = ipoib_get_stats,
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5a887efb4bdf..37b33d708c2d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -83,6 +83,7 @@ static struct scsi_host_template iscsi_iser_sht;
static struct iscsi_transport iscsi_iser_transport;
static struct scsi_transport_template *iscsi_iser_scsi_transport;
static struct workqueue_struct *release_wq;
+static DEFINE_MUTEX(unbind_iser_conn_mutex);
struct iser_global ig;
int iser_debug_level = 0;
@@ -550,12 +551,14 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*/
if (iser_conn) {
mutex_lock(&iser_conn->state_mutex);
+ mutex_lock(&unbind_iser_conn_mutex);
iser_conn_terminate(iser_conn);
iscsi_conn_stop(cls_conn, flag);
/* unbind */
iser_conn->iscsi_conn = NULL;
conn->dd_data = NULL;
+ mutex_unlock(&unbind_iser_conn_mutex);
complete(&iser_conn->stop_completion);
mutex_unlock(&iser_conn->state_mutex);
@@ -977,13 +980,21 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
struct iser_conn *iser_conn;
struct ib_device *ib_dev;
+ mutex_lock(&unbind_iser_conn_mutex);
+
session = starget_to_session(scsi_target(sdev))->dd_data;
iser_conn = session->leadconn->dd_data;
+ if (!iser_conn) {
+ mutex_unlock(&unbind_iser_conn_mutex);
+ return -ENOTCONN;
+ }
ib_dev = iser_conn->ib_conn.device->ib_device;
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
+ mutex_unlock(&unbind_iser_conn_mutex);
+
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index c538a38c91ce..26a004e97ae0 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -708,8 +708,14 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
unsigned short sg_tablesize, sup_sg_tablesize;
sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
- sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
- device->ib_device->attrs.max_fast_reg_page_list_len);
+ if (device->ib_device->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)
+ sup_sg_tablesize =
+ min_t(
+ uint, ISCSI_ISER_MAX_SG_TABLESIZE,
+ device->ib_device->attrs.max_fast_reg_page_list_len);
+ else
+ sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index fcbed35e95a8..0e662656ef42 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1452,7 +1452,7 @@ static void
isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct isert_conn *isert_conn = wc->qp->qp_context;
- struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
isert_print_wc(wc, "login recv");
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 1ced0731c140..402275be0931 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1157,8 +1157,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
}
spin_unlock_irqrestore(&ioctx->spinlock, flags);
- pr_debug("Aborting cmd with state %d and tag %lld\n", state,
- ioctx->cmd.tag);
+ pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
+ ioctx->state, ioctx->cmd.tag);
switch (state) {
case SRPT_STATE_NEW:
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index da3d362f21b1..a047b9af8369 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -48,6 +48,7 @@ struct gpio_button_data {
spinlock_t lock;
bool disabled;
bool key_pressed;
+ bool suspended;
};
struct gpio_keys_drvdata {
@@ -396,8 +397,20 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
BUG_ON(irq != bdata->irq);
- if (bdata->button->wakeup)
+ if (bdata->button->wakeup) {
+ const struct gpio_keys_button *button = bdata->button;
+
pm_stay_awake(bdata->input->dev.parent);
+ if (bdata->suspended &&
+ (button->type == 0 || button->type == EV_KEY)) {
+ /*
+ * Simulate wakeup key press in case the key has
+ * already released by the time we got interrupt
+ * handler to run.
+ */
+ input_report_key(bdata->input, button->code, 1);
+ }
+ }
mod_delayed_work(system_wq,
&bdata->work,
@@ -855,6 +868,7 @@ static int __maybe_unused gpio_keys_suspend(struct device *dev)
struct gpio_button_data *bdata = &ddata->data[i];
if (bdata->button->wakeup)
enable_irq_wake(bdata->irq);
+ bdata->suspended = true;
}
} else {
mutex_lock(&input->mutex);
@@ -878,6 +892,7 @@ static int __maybe_unused gpio_keys_resume(struct device *dev)
struct gpio_button_data *bdata = &ddata->data[i];
if (bdata->button->wakeup)
disable_irq_wake(bdata->irq);
+ bdata->suspended = false;
}
} else {
mutex_lock(&input->mutex);
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index eb770613a9bd..fa130e7b734c 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/input.h>
+#include <linux/input/mt.h>
#include <linux/slab.h>
#include <asm/xen/hypervisor.h>
@@ -34,11 +35,14 @@
struct xenkbd_info {
struct input_dev *kbd;
struct input_dev *ptr;
+ struct input_dev *mtouch;
struct xenkbd_page *page;
int gref;
int irq;
struct xenbus_device *xbdev;
char phys[32];
+ /* current MT slot/contact ID we are injecting events in */
+ int mtouch_cur_contact_id;
};
enum { KPARAM_X, KPARAM_Y, KPARAM_CNT };
@@ -56,6 +60,112 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *);
* to do that.
*/
+static void xenkbd_handle_motion_event(struct xenkbd_info *info,
+ struct xenkbd_motion *motion)
+{
+ input_report_rel(info->ptr, REL_X, motion->rel_x);
+ input_report_rel(info->ptr, REL_Y, motion->rel_y);
+ if (motion->rel_z)
+ input_report_rel(info->ptr, REL_WHEEL, -motion->rel_z);
+ input_sync(info->ptr);
+}
+
+static void xenkbd_handle_position_event(struct xenkbd_info *info,
+ struct xenkbd_position *pos)
+{
+ input_report_abs(info->ptr, ABS_X, pos->abs_x);
+ input_report_abs(info->ptr, ABS_Y, pos->abs_y);
+ if (pos->rel_z)
+ input_report_rel(info->ptr, REL_WHEEL, -pos->rel_z);
+ input_sync(info->ptr);
+}
+
+static void xenkbd_handle_key_event(struct xenkbd_info *info,
+ struct xenkbd_key *key)
+{
+ struct input_dev *dev;
+
+ if (test_bit(key->keycode, info->ptr->keybit)) {
+ dev = info->ptr;
+ } else if (test_bit(key->keycode, info->kbd->keybit)) {
+ dev = info->kbd;
+ } else {
+ pr_warn("unhandled keycode 0x%x\n", key->keycode);
+ return;
+ }
+
+ input_report_key(dev, key->keycode, key->pressed);
+ input_sync(dev);
+}
+
+static void xenkbd_handle_mt_event(struct xenkbd_info *info,
+ struct xenkbd_mtouch *mtouch)
+{
+ if (unlikely(!info->mtouch))
+ return;
+
+ if (mtouch->contact_id != info->mtouch_cur_contact_id) {
+ info->mtouch_cur_contact_id = mtouch->contact_id;
+ input_mt_slot(info->mtouch, mtouch->contact_id);
+ }
+
+ switch (mtouch->event_type) {
+ case XENKBD_MT_EV_DOWN:
+ input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, true);
+ /* fall through */
+
+ case XENKBD_MT_EV_MOTION:
+ input_report_abs(info->mtouch, ABS_MT_POSITION_X,
+ mtouch->u.pos.abs_x);
+ input_report_abs(info->mtouch, ABS_MT_POSITION_Y,
+ mtouch->u.pos.abs_y);
+ break;
+
+ case XENKBD_MT_EV_SHAPE:
+ input_report_abs(info->mtouch, ABS_MT_TOUCH_MAJOR,
+ mtouch->u.shape.major);
+ input_report_abs(info->mtouch, ABS_MT_TOUCH_MINOR,
+ mtouch->u.shape.minor);
+ break;
+
+ case XENKBD_MT_EV_ORIENT:
+ input_report_abs(info->mtouch, ABS_MT_ORIENTATION,
+ mtouch->u.orientation);
+ break;
+
+ case XENKBD_MT_EV_UP:
+ input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, false);
+ break;
+
+ case XENKBD_MT_EV_SYN:
+ input_mt_sync_frame(info->mtouch);
+ input_sync(info->mtouch);
+ break;
+ }
+}
+
+static void xenkbd_handle_event(struct xenkbd_info *info,
+ union xenkbd_in_event *event)
+{
+ switch (event->type) {
+ case XENKBD_TYPE_MOTION:
+ xenkbd_handle_motion_event(info, &event->motion);
+ break;
+
+ case XENKBD_TYPE_KEY:
+ xenkbd_handle_key_event(info, &event->key);
+ break;
+
+ case XENKBD_TYPE_POS:
+ xenkbd_handle_position_event(info, &event->pos);
+ break;
+
+ case XENKBD_TYPE_MTOUCH:
+ xenkbd_handle_mt_event(info, &event->mtouch);
+ break;
+ }
+}
+
static irqreturn_t input_handler(int rq, void *dev_id)
{
struct xenkbd_info *info = dev_id;
@@ -66,44 +176,8 @@ static irqreturn_t input_handler(int rq, void *dev_id)
if (prod == page->in_cons)
return IRQ_HANDLED;
rmb(); /* ensure we see ring contents up to prod */
- for (cons = page->in_cons; cons != prod; cons++) {
- union xenkbd_in_event *event;
- struct input_dev *dev;
- event = &XENKBD_IN_RING_REF(page, cons);
-
- dev = info->ptr;
- switch (event->type) {
- case XENKBD_TYPE_MOTION:
- input_report_rel(dev, REL_X, event->motion.rel_x);
- input_report_rel(dev, REL_Y, event->motion.rel_y);
- if (event->motion.rel_z)
- input_report_rel(dev, REL_WHEEL,
- -event->motion.rel_z);
- break;
- case XENKBD_TYPE_KEY:
- dev = NULL;
- if (test_bit(event->key.keycode, info->kbd->keybit))
- dev = info->kbd;
- if (test_bit(event->key.keycode, info->ptr->keybit))
- dev = info->ptr;
- if (dev)
- input_report_key(dev, event->key.keycode,
- event->key.pressed);
- else
- pr_warn("unhandled keycode 0x%x\n",
- event->key.keycode);
- break;
- case XENKBD_TYPE_POS:
- input_report_abs(dev, ABS_X, event->pos.abs_x);
- input_report_abs(dev, ABS_Y, event->pos.abs_y);
- if (event->pos.rel_z)
- input_report_rel(dev, REL_WHEEL,
- -event->pos.rel_z);
- break;
- }
- if (dev)
- input_sync(dev);
- }
+ for (cons = page->in_cons; cons != prod; cons++)
+ xenkbd_handle_event(info, &XENKBD_IN_RING_REF(page, cons));
mb(); /* ensure we got ring contents */
page->in_cons = cons;
notify_remote_via_irq(info->irq);
@@ -115,9 +189,9 @@ static int xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int ret, i;
- unsigned int abs;
+ unsigned int abs, touch;
struct xenkbd_info *info;
- struct input_dev *kbd, *ptr;
+ struct input_dev *kbd, *ptr, *mtouch;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
@@ -152,6 +226,17 @@ static int xenkbd_probe(struct xenbus_device *dev,
}
}
+ touch = xenbus_read_unsigned(dev->nodename,
+ XENKBD_FIELD_FEAT_MTOUCH, 0);
+ if (touch) {
+ ret = xenbus_write(XBT_NIL, dev->nodename,
+ XENKBD_FIELD_REQ_MTOUCH, "1");
+ if (ret) {
+ pr_warn("xenkbd: can't request multi-touch");
+ touch = 0;
+ }
+ }
+
/* keyboard */
kbd = input_allocate_device();
if (!kbd)
@@ -208,6 +293,58 @@ static int xenkbd_probe(struct xenbus_device *dev,
}
info->ptr = ptr;
+ /* multi-touch device */
+ if (touch) {
+ int num_cont, width, height;
+
+ mtouch = input_allocate_device();
+ if (!mtouch)
+ goto error_nomem;
+
+ num_cont = xenbus_read_unsigned(info->xbdev->nodename,
+ XENKBD_FIELD_MT_NUM_CONTACTS,
+ 1);
+ width = xenbus_read_unsigned(info->xbdev->nodename,
+ XENKBD_FIELD_MT_WIDTH,
+ XENFB_WIDTH);
+ height = xenbus_read_unsigned(info->xbdev->nodename,
+ XENKBD_FIELD_MT_HEIGHT,
+ XENFB_HEIGHT);
+
+ mtouch->name = "Xen Virtual Multi-touch";
+ mtouch->phys = info->phys;
+ mtouch->id.bustype = BUS_PCI;
+ mtouch->id.vendor = 0x5853;
+ mtouch->id.product = 0xfffd;
+
+ input_set_abs_params(mtouch, ABS_MT_TOUCH_MAJOR,
+ 0, 255, 0, 0);
+ input_set_abs_params(mtouch, ABS_MT_POSITION_X,
+ 0, width, 0, 0);
+ input_set_abs_params(mtouch, ABS_MT_POSITION_Y,
+ 0, height, 0, 0);
+ input_set_abs_params(mtouch, ABS_MT_PRESSURE,
+ 0, 255, 0, 0);
+
+ ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT);
+ if (ret) {
+ input_free_device(mtouch);
+ xenbus_dev_fatal(info->xbdev, ret,
+ "input_mt_init_slots");
+ goto error;
+ }
+
+ ret = input_register_device(mtouch);
+ if (ret) {
+ input_free_device(mtouch);
+ xenbus_dev_fatal(info->xbdev, ret,
+ "input_register_device(mtouch)");
+ goto error;
+ }
+ info->mtouch_cur_contact_id = -1;
+ info->mtouch = mtouch;
+ }
+
ret = xenkbd_connect_backend(dev, info);
if (ret < 0)
goto error;
@@ -240,6 +377,8 @@ static int xenkbd_remove(struct xenbus_device *dev)
input_unregister_device(info->kbd);
if (info->ptr)
input_unregister_device(info->ptr);
+ if (info->mtouch)
+ input_unregister_device(info->mtouch);
free_page((unsigned long)info->page);
kfree(info);
return 0;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index c52da651269b..824f4c1c1f31 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -436,8 +436,10 @@ static int i8042_start(struct serio *serio)
{
struct i8042_port *port = serio->port_data;
+ spin_lock_irq(&i8042_lock);
port->exists = true;
- mb();
+ spin_unlock_irq(&i8042_lock);
+
return 0;
}
@@ -450,16 +452,20 @@ static void i8042_stop(struct serio *serio)
{
struct i8042_port *port = serio->port_data;
+ spin_lock_irq(&i8042_lock);
port->exists = false;
+ port->serio = NULL;
+ spin_unlock_irq(&i8042_lock);
/*
+ * We need to make sure that interrupt handler finishes using
+ * our serio port before we return from this function.
* We synchronize with both AUX and KBD IRQs because there is
* a (very unlikely) chance that AUX IRQ is raised for KBD port
* and vice versa.
*/
synchronize_irq(I8042_AUX_IRQ);
synchronize_irq(I8042_KBD_IRQ);
- port->serio = NULL;
}
/*
@@ -576,7 +582,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&i8042_lock, flags);
- if (likely(port->exists && !filtered))
+ if (likely(serio && !filtered))
serio_interrupt(serio, data, dfl);
out:
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 060d357f107f..6f423bc49d0d 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -485,18 +485,19 @@ static int isdn_divert_icall(isdn_ctrl *ic)
cs->deflect_dest[0] = '\0';
retval = 4; /* only proceed */
}
- sprintf(cs->info, "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
- cs->akt_state,
- cs->divert_id,
- divert_if.drv_to_name(cs->ics.driver),
- (ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
- cs->ics.parm.setup.phone,
- cs->ics.parm.setup.eazmsn,
- cs->ics.parm.setup.si1,
- cs->ics.parm.setup.si2,
- cs->ics.parm.setup.screen,
- dv->rule.waittime,
- cs->deflect_dest);
+ snprintf(cs->info, sizeof(cs->info),
+ "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
+ cs->akt_state,
+ cs->divert_id,
+ divert_if.drv_to_name(cs->ics.driver),
+ (ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
+ cs->ics.parm.setup.phone,
+ cs->ics.parm.setup.eazmsn,
+ cs->ics.parm.setup.si1,
+ cs->ics.parm.setup.si2,
+ cs->ics.parm.setup.screen,
+ dv->rule.waittime,
+ cs->deflect_dest);
if ((dv->rule.action == DEFLECT_REPORT) ||
(dv->rule.action == DEFLECT_REJECT)) {
put_info_buffer(cs->info);
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 40c7e2cf423b..034cabac699d 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -42,7 +42,7 @@ static char *revision = "$Revision: 1.1.2.2 $";
static bool suppress_pollack;
-static struct pci_device_id c4_pci_tbl[] = {
+static const struct pci_device_id c4_pci_tbl[] = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 },
{ } /* Terminating entry */
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 8b7ad4f1ab01..b2023e08dcd2 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -110,7 +110,7 @@ typedef struct _diva_os_thread_dpc {
/*
This table should be sorted by PCI device ID
*/
-static struct pci_device_id divas_pci_tbl[] = {
+static const struct pci_device_id divas_pci_tbl[] = {
/* Diva Server BRI-2M PCI 0xE010 */
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA),
CARDTYPE_MAESTRA_PCI },
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index e3fa1cd64470..dce6632daae1 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -1142,7 +1142,7 @@ fritz_remove_pci(struct pci_dev *pdev)
pr_info("%s: drvdata already removed\n", __func__);
}
-static struct pci_device_id fcpci_ids[] = {
+static const struct pci_device_id fcpci_ids[] = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID,
0, 0, (unsigned long) "Fritz!Card PCI"},
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index aea0c9616ea5..3cf07b8ced1c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -5348,7 +5348,7 @@ static const struct hm_map hfcm_map[] = {
#undef H
#define H(x) ((unsigned long)&hfcm_map[x])
-static struct pci_device_id hfmultipci_ids[] = {
+static const struct pci_device_id hfmultipci_ids[] = {
/* Cards with HFC-4S Chip */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 5dc246d71c16..d2e401a8090e 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2161,7 +2161,7 @@ static const struct _hfc_map hfc_map[] =
{},
};
-static struct pci_device_id hfc_ids[] =
+static const struct pci_device_id hfc_ids[] =
{
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
(unsigned long) &hfc_map[0] },
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index afde4edef9ae..6a6d848bd18e 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1137,7 +1137,7 @@ static void nj_remove(struct pci_dev *pdev)
/* We cannot select cards with PCI_SUB... IDs, since here are cards with
* SUB IDs set to PCI_ANY_ID, so we need to match all and reject
* known other cards which not work with this driver - see probe function */
-static struct pci_device_id nj_pci_ids[] = {
+static const struct pci_device_id nj_pci_ids[] = {
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 3052c836b89f..d80072fef434 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1398,7 +1398,7 @@ w6692_remove_pci(struct pci_dev *pdev)
pr_notice("%s: drvdata already removed\n", __func__);
}
-static struct pci_device_id w6692_ids[] = {
+static const struct pci_device_id w6692_ids[] = {
{ PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (ulong)&w6692_map[0]},
{ PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692,
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index c7d68675b028..7108bdb8742e 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1909,7 +1909,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
#ifdef CONFIG_PCI
#include <linux/pci.h>
-static struct pci_device_id hisax_pci_tbl[] __used = {
+static const struct pci_device_id hisax_pci_tbl[] __used = {
#ifdef CONFIG_HISAX_FRITZPCI
{PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) },
#endif
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 90f051ce0259..9090cc1e1f29 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -86,7 +86,7 @@ typedef struct {
char *device_name;
} hfc4s8s_param;
-static struct pci_device_id hfc4s8s_ids[] = {
+static const struct pci_device_id hfc4s8s_ids[] = {
{.vendor = PCI_VENDOR_ID_CCD,
.device = PCI_DEVICE_ID_4S,
.subvendor = 0x1397,
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 5a9f39ed1d5d..e4f7573ba9bf 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -52,7 +52,7 @@ module_param(debug, int, 0);
MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>");
MODULE_DESCRIPTION("AVM Fritz!PCI/PnP ISDN driver");
-static struct pci_device_id fcpci_ids[] = {
+static const struct pci_device_id fcpci_ids[] = {
{ .vendor = PCI_VENDOR_ID_AVM,
.device = PCI_DEVICE_ID_AVM_A1,
.subvendor = PCI_ANY_ID,
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f4eace5ea184..40f3cd7eab0f 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -156,7 +156,8 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
rdev_for_each(rdev, mddev) {
if (! test_bit(In_sync, &rdev->flags)
- || test_bit(Faulty, &rdev->flags))
+ || test_bit(Faulty, &rdev->flags)
+ || test_bit(Bitmap_sync, &rdev->flags))
continue;
target = offset + index * (PAGE_SIZE/512);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 991f0fe2dcc6..b50eb4ac1b82 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -134,7 +134,9 @@ enum flag_bits {
Faulty, /* device is known to have a fault */
In_sync, /* device is in_sync with rest of array */
Bitmap_sync, /* ..actually, not quite In_sync. Need a
- * bitmap-based recovery to get fully in sync
+ * bitmap-based recovery to get fully in sync.
+ * The bit is only meaningful before device
+ * has been passed to pers->hot_add_disk.
*/
WriteMostly, /* Avoid reading if at all possible */
AutoDetected, /* added by auto-detect */
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 77cce3573aa8..44ad5baf3206 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1150,7 +1150,7 @@ int ppl_init_log(struct r5conf *conf)
goto err;
}
- ppl_conf->bs = bioset_create(conf->raid_disks, 0, 0);
+ ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS);
if (!ppl_conf->bs) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2ceb338b094b..aeeb8d6854e2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7951,12 +7951,10 @@ static void end_reshape(struct r5conf *conf)
{
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
- struct md_rdev *rdev;
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
- rdev_for_each(rdev, conf->mddev)
- rdev->data_offset = rdev->new_data_offset;
+ md_finish_reshape(conf->mddev);
smp_wmb();
conf->reshape_progress = MaxSector;
conf->mddev->reshape_position = MaxSector;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 0cfac2d39107..8ac59dc80f23 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -637,6 +637,9 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
sizeof(num_of_cmds)))
return -EFAULT;
+ if (!num_of_cmds)
+ return 0;
+
if (num_of_cmds > MMC_IOC_MAX_CMDS)
return -EINVAL;
@@ -1182,7 +1185,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
- for (i = 0; i < mq_rq->ioc_count; i++) {
+ for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
ret = __mmc_blk_ioctl_cmd(card, md, mq_rq->idata[i]);
if (ret)
break;
@@ -2167,6 +2170,7 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
+ blk_set_queue_dying(md->queue.queue);
mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index cf66a3db71b8..ac678e9fb19a 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -45,6 +45,7 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
+#include <linux/pci.h>
#endif
#include "sdhci.h"
@@ -134,6 +135,16 @@ static bool sdhci_acpi_byt(void)
return x86_match_cpu(byt);
}
+static bool sdhci_acpi_cht(void)
+{
+ static const struct x86_cpu_id cht[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+ {}
+ };
+
+ return x86_match_cpu(cht);
+}
+
#define BYT_IOSF_SCCEP 0x63
#define BYT_IOSF_OCP_NETCTRL0 0x1078
#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
@@ -178,6 +189,45 @@ static bool sdhci_acpi_byt_defer(struct device *dev)
return false;
}
+static bool sdhci_acpi_cht_pci_wifi(unsigned int vendor, unsigned int device,
+ unsigned int slot, unsigned int parent_slot)
+{
+ struct pci_dev *dev, *parent, *from = NULL;
+
+ while (1) {
+ dev = pci_get_device(vendor, device, from);
+ pci_dev_put(from);
+ if (!dev)
+ break;
+ parent = pci_upstream_bridge(dev);
+ if (ACPI_COMPANION(&dev->dev) && PCI_SLOT(dev->devfn) == slot &&
+ parent && PCI_SLOT(parent->devfn) == parent_slot &&
+ !pci_upstream_bridge(parent)) {
+ pci_dev_put(dev);
+ return true;
+ }
+ from = dev;
+ }
+
+ return false;
+}
+
+/*
+ * GPDwin uses PCI wifi which conflicts with SDIO's use of
+ * acpi_device_fix_up_power() on child device nodes. Identifying GPDwin is
+ * problematic, but since SDIO is only used for wifi, the presence of the PCI
+ * wifi card in the expected slot with an ACPI companion node, is used to
+ * indicate that acpi_device_fix_up_power() should be avoided.
+ */
+static inline bool sdhci_acpi_no_fixup_child_power(const char *hid,
+ const char *uid)
+{
+ return sdhci_acpi_cht() &&
+ !strcmp(hid, "80860F14") &&
+ !strcmp(uid, "2") &&
+ sdhci_acpi_cht_pci_wifi(0x14e4, 0x43ec, 0, 28);
+}
+
#else
static inline void sdhci_acpi_byt_setting(struct device *dev)
@@ -189,6 +239,12 @@ static inline bool sdhci_acpi_byt_defer(struct device *dev)
return false;
}
+static inline bool sdhci_acpi_no_fixup_child_power(const char *hid,
+ const char *uid)
+{
+ return false;
+}
+
#endif
static int bxt_get_cd(struct mmc_host *mmc)
@@ -389,18 +445,20 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (acpi_bus_get_device(handle, &device))
return -ENODEV;
+ hid = acpi_device_hid(device);
+ uid = device->pnp.unique_id;
+
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
- list_for_each_entry(child, &device->children, node)
- if (child->status.present && child->status.enabled)
- acpi_device_fix_up_power(child);
+ if (!sdhci_acpi_no_fixup_child_power(hid, uid)) {
+ list_for_each_entry(child, &device->children, node)
+ if (child->status.present && child->status.enabled)
+ acpi_device_fix_up_power(child);
+ }
if (sdhci_acpi_byt_defer(dev))
return -EPROBE_DEFER;
- hid = acpi_device_hid(device);
- uid = device->pnp.unique_id;
-
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem)
return -ENOMEM;
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 82b80d42f7ae..88a94355ac90 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -409,30 +409,29 @@ static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
* Transfer the data
*/
if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
- u8 data[4] = { };
+ u32 data = 0;
+ u32 *buf32 = (u32 *)buf;
if (is_read)
- sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
+ sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32,
count >> 2);
else
- sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
+ sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32,
count >> 2);
/* if count was multiple of 4 */
if (!(count & 0x3))
return;
- buf8 = (u8 *)(buf + (count >> 2));
+ buf32 += count >> 2;
count %= 4;
if (is_read) {
- sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT,
- (u32 *)data, 1);
- memcpy(buf8, data, count);
+ sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1);
+ memcpy(buf32, &data, count);
} else {
- memcpy(data, buf8, count);
- sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT,
- (u32 *)data, 1);
+ memcpy(&data, buf32, count);
+ sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1);
}
return;
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index e15a9733fcfd..9668616faf16 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1386,7 +1386,7 @@ static void wbsd_request_dma(struct wbsd_host *host, int dma)
* order for ISA to be able to DMA to it.
*/
host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
- GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
+ GFP_NOIO | GFP_DMA | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!host->dma_buffer)
goto free;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index e83a279f1217..5a2d71729b9a 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -155,6 +155,10 @@ config MTD_BCM47XX_PARTS
This provides partitions parser for devices based on BCM47xx
boards.
+menu "Partition parsers"
+source "drivers/mtd/parsers/Kconfig"
+endmenu
+
comment "User Modules And Translation Layers"
#
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 99bb9a1f6e16..151d60df303a 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
+obj-y += parsers/
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index d10fa6c8f074..fe2581d9d882 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -43,7 +43,8 @@
#define ML_MAGIC2 0x26594131
#define TRX_MAGIC 0x30524448
#define SHSQ_MAGIC 0x71736873 /* shsq (weird ZTE H218N endianness) */
-#define UBI_EC_MAGIC 0x23494255 /* UBI# */
+
+static const char * const trx_types[] = { "trx", NULL };
struct trx_header {
uint32_t magic;
@@ -62,89 +63,6 @@ static void bcm47xxpart_add_part(struct mtd_partition *part, const char *name,
part->mask_flags = mask_flags;
}
-static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
- size_t offset)
-{
- uint32_t buf;
- size_t bytes_read;
- int err;
-
- err = mtd_read(master, offset, sizeof(buf), &bytes_read,
- (uint8_t *)&buf);
- if (err && !mtd_is_bitflip(err)) {
- pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
- offset, err);
- goto out_default;
- }
-
- if (buf == UBI_EC_MAGIC)
- return "ubi";
-
-out_default:
- return "rootfs";
-}
-
-static int bcm47xxpart_parse_trx(struct mtd_info *master,
- struct mtd_partition *trx,
- struct mtd_partition *parts,
- size_t parts_len)
-{
- struct trx_header header;
- size_t bytes_read;
- int curr_part = 0;
- int i, err;
-
- if (parts_len < 3) {
- pr_warn("No enough space to add TRX partitions!\n");
- return -ENOMEM;
- }
-
- err = mtd_read(master, trx->offset, sizeof(header), &bytes_read,
- (uint8_t *)&header);
- if (err && !mtd_is_bitflip(err)) {
- pr_err("mtd_read error while reading TRX header: %d\n", err);
- return err;
- }
-
- i = 0;
-
- /* We have LZMA loader if offset[2] points to sth */
- if (header.offset[2]) {
- bcm47xxpart_add_part(&parts[curr_part++], "loader",
- trx->offset + header.offset[i], 0);
- i++;
- }
-
- if (header.offset[i]) {
- bcm47xxpart_add_part(&parts[curr_part++], "linux",
- trx->offset + header.offset[i], 0);
- i++;
- }
-
- if (header.offset[i]) {
- size_t offset = trx->offset + header.offset[i];
- const char *name = bcm47xxpart_trx_data_part_name(master,
- offset);
-
- bcm47xxpart_add_part(&parts[curr_part++], name, offset, 0);
- i++;
- }
-
- /*
- * Assume that every partition ends at the beginning of the one it is
- * followed by.
- */
- for (i = 0; i < curr_part; i++) {
- u64 next_part_offset = (i < curr_part - 1) ?
- parts[i + 1].offset :
- trx->offset + trx->size;
-
- parts[i].size = next_part_offset - parts[i].offset;
- }
-
- return curr_part;
-}
-
/**
* bcm47xxpart_bootpartition - gets index of TRX partition used by bootloader
*
@@ -362,17 +280,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
for (i = 0; i < trx_num; i++) {
struct mtd_partition *trx = &parts[trx_parts[i]];
- if (i == bcm47xxpart_bootpartition()) {
- int num_parts;
-
- num_parts = bcm47xxpart_parse_trx(master, trx,
- parts + curr_part,
- BCM47XXPART_MAX_PARTS - curr_part);
- if (num_parts > 0)
- curr_part += num_parts;
- } else {
+ if (i == bcm47xxpart_bootpartition())
+ trx->types = trx_types;
+ else
trx->name = "failsafe";
- }
}
*pparts = parts;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 94d3eb42c4d5..7d342965f392 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
size_t totlen = 0, thislen;
int ret = 0;
size_t buflen = 0;
- static char *buffer;
+ char *buffer;
if (!ECCBUF_SIZE) {
/* We should fall back to a general writev implementation.
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 58329d2dacd1..6def5445e03e 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -95,6 +95,16 @@ config MTD_M25P80
if you want to specify device partitioning or to use a device which
doesn't support the JEDEC ID instruction.
+config MTD_MCHP23K256
+ tristate "Microchip 23K256 SRAM"
+ depends on SPI_MASTER
+ help
+ This enables access to Microchip 23K256 SRAM chips, using SPI.
+
+ Set up your spi devices with the right board-specific
+ platform data, or a device tree description if you want to
+ specify device partitioning
+
config MTD_SPEAR_SMI
tristate "SPEAR MTD NOR Support through SMI controller"
depends on PLAT_SPEAR
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 7912d3a0ee34..f0f767624cc6 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
+obj-$(CONFIG_MTD_MCHP23K256) += mchp23k256.o
obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index c4df3b1bded0..00eea6fd379c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -78,11 +78,17 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
{
struct m25p *flash = nor->priv;
struct spi_device *spi = flash->spi;
- struct spi_transfer t[2] = {};
+ unsigned int inst_nbits, addr_nbits, data_nbits, data_idx;
+ struct spi_transfer t[3] = {};
struct spi_message m;
int cmd_sz = m25p_cmdsz(nor);
ssize_t ret;
+ /* get transfer protocols. */
+ inst_nbits = spi_nor_get_protocol_inst_nbits(nor->write_proto);
+ addr_nbits = spi_nor_get_protocol_addr_nbits(nor->write_proto);
+ data_nbits = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
spi_message_init(&m);
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
@@ -92,12 +98,27 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
m25p_addr2cmd(nor, to, flash->command);
t[0].tx_buf = flash->command;
+ t[0].tx_nbits = inst_nbits;
t[0].len = cmd_sz;
spi_message_add_tail(&t[0], &m);
- t[1].tx_buf = buf;
- t[1].len = len;
- spi_message_add_tail(&t[1], &m);
+ /* split the op code and address bytes into two transfers if needed. */
+ data_idx = 1;
+ if (addr_nbits != inst_nbits) {
+ t[0].len = 1;
+
+ t[1].tx_buf = &flash->command[1];
+ t[1].tx_nbits = addr_nbits;
+ t[1].len = cmd_sz - 1;
+ spi_message_add_tail(&t[1], &m);
+
+ data_idx = 2;
+ }
+
+ t[data_idx].tx_buf = buf;
+ t[data_idx].tx_nbits = data_nbits;
+ t[data_idx].len = len;
+ spi_message_add_tail(&t[data_idx], &m);
ret = spi_sync(spi, &m);
if (ret)
@@ -109,18 +130,6 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
return ret;
}
-static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)
-{
- switch (nor->flash_read) {
- case SPI_NOR_DUAL:
- return 2;
- case SPI_NOR_QUAD:
- return 4;
- default:
- return 0;
- }
-}
-
/*
* Read an address range from the nor chip. The address range
* may be any size provided it is within the physical boundaries.
@@ -130,13 +139,20 @@ static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
{
struct m25p *flash = nor->priv;
struct spi_device *spi = flash->spi;
- struct spi_transfer t[2];
+ unsigned int inst_nbits, addr_nbits, data_nbits, data_idx;
+ struct spi_transfer t[3];
struct spi_message m;
unsigned int dummy = nor->read_dummy;
ssize_t ret;
+ int cmd_sz;
+
+ /* get transfer protocols. */
+ inst_nbits = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+ addr_nbits = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+ data_nbits = spi_nor_get_protocol_data_nbits(nor->read_proto);
/* convert the dummy cycles to the number of bytes */
- dummy /= 8;
+ dummy = (dummy * addr_nbits) / 8;
if (spi_flash_read_supported(spi)) {
struct spi_flash_read_message msg;
@@ -149,10 +165,9 @@ static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
msg.read_opcode = nor->read_opcode;
msg.addr_width = nor->addr_width;
msg.dummy_bytes = dummy;
- /* TODO: Support other combinations */
- msg.opcode_nbits = SPI_NBITS_SINGLE;
- msg.addr_nbits = SPI_NBITS_SINGLE;
- msg.data_nbits = m25p80_rx_nbits(nor);
+ msg.opcode_nbits = inst_nbits;
+ msg.addr_nbits = addr_nbits;
+ msg.data_nbits = data_nbits;
ret = spi_flash_read(spi, &msg);
if (ret < 0)
@@ -167,20 +182,45 @@ static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
m25p_addr2cmd(nor, from, flash->command);
t[0].tx_buf = flash->command;
+ t[0].tx_nbits = inst_nbits;
t[0].len = m25p_cmdsz(nor) + dummy;
spi_message_add_tail(&t[0], &m);
- t[1].rx_buf = buf;
- t[1].rx_nbits = m25p80_rx_nbits(nor);
- t[1].len = min3(len, spi_max_transfer_size(spi),
- spi_max_message_size(spi) - t[0].len);
- spi_message_add_tail(&t[1], &m);
+ /*
+ * Set all dummy/mode cycle bits to avoid sending some manufacturer
+ * specific pattern, which might make the memory enter its Continuous
+ * Read mode by mistake.
+ * Based on the different mode cycle bit patterns listed and described
+ * in the JESD216B specification, the 0xff value works for all memories
+ * and all manufacturers.
+ */
+ cmd_sz = t[0].len;
+ memset(flash->command + cmd_sz - dummy, 0xff, dummy);
+
+ /* split the op code and address bytes into two transfers if needed. */
+ data_idx = 1;
+ if (addr_nbits != inst_nbits) {
+ t[0].len = 1;
+
+ t[1].tx_buf = &flash->command[1];
+ t[1].tx_nbits = addr_nbits;
+ t[1].len = cmd_sz - 1;
+ spi_message_add_tail(&t[1], &m);
+
+ data_idx = 2;
+ }
+
+ t[data_idx].rx_buf = buf;
+ t[data_idx].rx_nbits = data_nbits;
+ t[data_idx].len = min3(len, spi_max_transfer_size(spi),
+ spi_max_message_size(spi) - cmd_sz);
+ spi_message_add_tail(&t[data_idx], &m);
ret = spi_sync(spi, &m);
if (ret)
return ret;
- ret = m.actual_length - m25p_cmdsz(nor) - dummy;
+ ret = m.actual_length - cmd_sz;
if (ret < 0)
return -EIO;
return ret;
@@ -196,7 +236,11 @@ static int m25p_probe(struct spi_device *spi)
struct flash_platform_data *data;
struct m25p *flash;
struct spi_nor *nor;
- enum read_mode mode = SPI_NOR_NORMAL;
+ struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP,
+ };
char *flash_name;
int ret;
@@ -221,10 +265,19 @@ static int m25p_probe(struct spi_device *spi)
spi_set_drvdata(spi, flash);
flash->spi = spi;
- if (spi->mode & SPI_RX_QUAD)
- mode = SPI_NOR_QUAD;
- else if (spi->mode & SPI_RX_DUAL)
- mode = SPI_NOR_DUAL;
+ if (spi->mode & SPI_RX_QUAD) {
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+
+ if (spi->mode & SPI_TX_QUAD)
+ hwcaps.mask |= (SNOR_HWCAPS_READ_1_4_4 |
+ SNOR_HWCAPS_PP_1_1_4 |
+ SNOR_HWCAPS_PP_1_4_4);
+ } else if (spi->mode & SPI_RX_DUAL) {
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+
+ if (spi->mode & SPI_TX_DUAL)
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_2_2;
+ }
if (data && data->name)
nor->mtd.name = data->name;
@@ -241,7 +294,7 @@ static int m25p_probe(struct spi_device *spi)
else
flash_name = spi->modalias;
- ret = spi_nor_scan(nor, flash_name, mode);
+ ret = spi_nor_scan(nor, flash_name, &hwcaps);
if (ret)
return ret;
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
new file mode 100644
index 000000000000..8956b7dcc984
--- /dev/null
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -0,0 +1,236 @@
+/*
+ * mchp23k256.c
+ *
+ * Driver for Microchip 23k256 SPI RAM chips
+ *
+ * Copyright © 2016 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/of_device.h>
+
+#define MAX_CMD_SIZE 4
+
+struct mchp23_caps {
+ u8 addr_width;
+ unsigned int size;
+};
+
+struct mchp23k256_flash {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct mtd_info mtd;
+ const struct mchp23_caps *caps;
+};
+
+#define MCHP23K256_CMD_WRITE_STATUS 0x01
+#define MCHP23K256_CMD_WRITE 0x02
+#define MCHP23K256_CMD_READ 0x03
+#define MCHP23K256_MODE_SEQ BIT(6)
+
+#define to_mchp23k256_flash(x) container_of(x, struct mchp23k256_flash, mtd)
+
+static void mchp23k256_addr2cmd(struct mchp23k256_flash *flash,
+ unsigned int addr, u8 *cmd)
+{
+ int i;
+
+ /*
+ * Address is sent in big endian (MSB first) and we skip
+ * the first entry of the cmd array which contains the cmd
+ * opcode.
+ */
+ for (i = flash->caps->addr_width; i > 0; i--, addr >>= 8)
+ cmd[i] = addr;
+}
+
+static int mchp23k256_cmdsz(struct mchp23k256_flash *flash)
+{
+ return 1 + flash->caps->addr_width;
+}
+
+static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const unsigned char *buf)
+{
+ struct mchp23k256_flash *flash = to_mchp23k256_flash(mtd);
+ struct spi_transfer transfer[2] = {};
+ struct spi_message message;
+ unsigned char command[MAX_CMD_SIZE];
+
+ spi_message_init(&message);
+
+ command[0] = MCHP23K256_CMD_WRITE;
+ mchp23k256_addr2cmd(flash, to, command);
+
+ transfer[0].tx_buf = command;
+ transfer[0].len = mchp23k256_cmdsz(flash);
+ spi_message_add_tail(&transfer[0], &message);
+
+ transfer[1].tx_buf = buf;
+ transfer[1].len = len;
+ spi_message_add_tail(&transfer[1], &message);
+
+ mutex_lock(&flash->lock);
+
+ spi_sync(flash->spi, &message);
+
+ if (retlen && message.actual_length > sizeof(command))
+ *retlen += message.actual_length - sizeof(command);
+
+ mutex_unlock(&flash->lock);
+ return 0;
+}
+
+static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ struct mchp23k256_flash *flash = to_mchp23k256_flash(mtd);
+ struct spi_transfer transfer[2] = {};
+ struct spi_message message;
+ unsigned char command[MAX_CMD_SIZE];
+
+ spi_message_init(&message);
+
+ memset(&transfer, 0, sizeof(transfer));
+ command[0] = MCHP23K256_CMD_READ;
+ mchp23k256_addr2cmd(flash, from, command);
+
+ transfer[0].tx_buf = command;
+ transfer[0].len = mchp23k256_cmdsz(flash);
+ spi_message_add_tail(&transfer[0], &message);
+
+ transfer[1].rx_buf = buf;
+ transfer[1].len = len;
+ spi_message_add_tail(&transfer[1], &message);
+
+ mutex_lock(&flash->lock);
+
+ spi_sync(flash->spi, &message);
+
+ if (retlen && message.actual_length > sizeof(command))
+ *retlen += message.actual_length - sizeof(command);
+
+ mutex_unlock(&flash->lock);
+ return 0;
+}
+
+/*
+ * Set the device into sequential mode. This allows read/writes to the
+ * entire SRAM in a single operation
+ */
+static int mchp23k256_set_mode(struct spi_device *spi)
+{
+ struct spi_transfer transfer = {};
+ struct spi_message message;
+ unsigned char command[2];
+
+ spi_message_init(&message);
+
+ command[0] = MCHP23K256_CMD_WRITE_STATUS;
+ command[1] = MCHP23K256_MODE_SEQ;
+
+ transfer.tx_buf = command;
+ transfer.len = sizeof(command);
+ spi_message_add_tail(&transfer, &message);
+
+ return spi_sync(spi, &message);
+}
+
+static const struct mchp23_caps mchp23k256_caps = {
+ .size = SZ_32K,
+ .addr_width = 2,
+};
+
+static const struct mchp23_caps mchp23lcv1024_caps = {
+ .size = SZ_128K,
+ .addr_width = 3,
+};
+
+static int mchp23k256_probe(struct spi_device *spi)
+{
+ struct mchp23k256_flash *flash;
+ struct flash_platform_data *data;
+ int err;
+
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+
+ flash->spi = spi;
+ mutex_init(&flash->lock);
+ spi_set_drvdata(spi, flash);
+
+ err = mchp23k256_set_mode(spi);
+ if (err)
+ return err;
+
+ data = dev_get_platdata(&spi->dev);
+
+ flash->caps = of_device_get_match_data(&spi->dev);
+ if (!flash->caps)
+ flash->caps = &mchp23k256_caps;
+
+ mtd_set_of_node(&flash->mtd, spi->dev.of_node);
+ flash->mtd.dev.parent = &spi->dev;
+ flash->mtd.type = MTD_RAM;
+ flash->mtd.flags = MTD_CAP_RAM;
+ flash->mtd.writesize = 1;
+ flash->mtd.size = flash->caps->size;
+ flash->mtd._read = mchp23k256_read;
+ flash->mtd._write = mchp23k256_write;
+
+ err = mtd_device_register(&flash->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mchp23k256_remove(struct spi_device *spi)
+{
+ struct mchp23k256_flash *flash = spi_get_drvdata(spi);
+
+ return mtd_device_unregister(&flash->mtd);
+}
+
+static const struct of_device_id mchp23k256_of_table[] = {
+ {
+ .compatible = "microchip,mchp23k256",
+ .data = &mchp23k256_caps,
+ },
+ {
+ .compatible = "microchip,mchp23lcv1024",
+ .data = &mchp23lcv1024_caps,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mchp23k256_of_table);
+
+static struct spi_driver mchp23k256_driver = {
+ .driver = {
+ .name = "mchp23k256",
+ .of_match_table = of_match_ptr(mchp23k256_of_table),
+ },
+ .probe = mchp23k256_probe,
+ .remove = mchp23k256_remove,
+};
+
+module_spi_driver(mchp23k256_driver);
+
+MODULE_DESCRIPTION("MTD SPI driver for MCHP23K256 RAM chips");
+MODULE_AUTHOR("Andrew Lunn <andre@lunn.ch>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:mchp23k256");
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index f9e9bd1cfaa0..5dc8bd042cc5 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -82,9 +82,13 @@
#define OP_WRITE_SECURITY_REVC 0x9A
#define OP_WRITE_SECURITY 0x9B /* revision D */
+#define CFI_MFR_ATMEL 0x1F
+
+#define DATAFLASH_SHIFT_EXTID 24
+#define DATAFLASH_SHIFT_ID 40
struct dataflash {
- uint8_t command[4];
+ u8 command[4];
char name[24];
unsigned short page_offset; /* offset in flash address */
@@ -129,8 +133,7 @@ static int dataflash_waitready(struct spi_device *spi)
for (;;) {
status = dataflash_status(spi);
if (status < 0) {
- pr_debug("%s: status %d?\n",
- dev_name(&spi->dev), status);
+ dev_dbg(&spi->dev, "status %d?\n", status);
status = 0;
}
@@ -153,12 +156,11 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
struct spi_transfer x = { };
struct spi_message msg;
unsigned blocksize = priv->page_size << 3;
- uint8_t *command;
- uint32_t rem;
+ u8 *command;
+ u32 rem;
- pr_debug("%s: erase addr=0x%llx len 0x%llx\n",
- dev_name(&spi->dev), (long long)instr->addr,
- (long long)instr->len);
+ dev_dbg(&spi->dev, "erase addr=0x%llx len 0x%llx\n",
+ (long long)instr->addr, (long long)instr->len);
div_u64_rem(instr->len, priv->page_size, &rem);
if (rem)
@@ -187,11 +189,11 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
pageaddr = pageaddr << priv->page_offset;
command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE;
- command[1] = (uint8_t)(pageaddr >> 16);
- command[2] = (uint8_t)(pageaddr >> 8);
+ command[1] = (u8)(pageaddr >> 16);
+ command[2] = (u8)(pageaddr >> 8);
command[3] = 0;
- pr_debug("ERASE %s: (%x) %x %x %x [%i]\n",
+ dev_dbg(&spi->dev, "ERASE %s: (%x) %x %x %x [%i]\n",
do_block ? "block" : "page",
command[0], command[1], command[2], command[3],
pageaddr);
@@ -200,8 +202,8 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
(void) dataflash_waitready(spi);
if (status < 0) {
- printk(KERN_ERR "%s: erase %x, err %d\n",
- dev_name(&spi->dev), pageaddr, status);
+ dev_err(&spi->dev, "erase %x, err %d\n",
+ pageaddr, status);
/* REVISIT: can retry instr->retries times; or
* giveup and instr->fail_addr = instr->addr;
*/
@@ -239,11 +241,11 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer x[2] = { };
struct spi_message msg;
unsigned int addr;
- uint8_t *command;
+ u8 *command;
int status;
- pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev),
- (unsigned)from, (unsigned)(from + len));
+ dev_dbg(&priv->spi->dev, "read 0x%x..0x%x\n",
+ (unsigned int)from, (unsigned int)(from + len));
/* Calculate flash page/byte address */
addr = (((unsigned)from / priv->page_size) << priv->page_offset)
@@ -251,7 +253,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
command = priv->command;
- pr_debug("READ: (%x) %x %x %x\n",
+ dev_dbg(&priv->spi->dev, "READ: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
spi_message_init(&msg);
@@ -271,9 +273,9 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
* fewer "don't care" bytes. Both buffers stay unchanged.
*/
command[0] = OP_READ_CONTINUOUS;
- command[1] = (uint8_t)(addr >> 16);
- command[2] = (uint8_t)(addr >> 8);
- command[3] = (uint8_t)(addr >> 0);
+ command[1] = (u8)(addr >> 16);
+ command[2] = (u8)(addr >> 8);
+ command[3] = (u8)(addr >> 0);
/* plus 4 "don't care" bytes */
status = spi_sync(priv->spi, &msg);
@@ -283,8 +285,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
*retlen = msg.actual_length - 8;
status = 0;
} else
- pr_debug("%s: read %x..%x --> %d\n",
- dev_name(&priv->spi->dev),
+ dev_dbg(&priv->spi->dev, "read %x..%x --> %d\n",
(unsigned)from, (unsigned)(from + len),
status);
return status;
@@ -308,10 +309,10 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t remaining = len;
u_char *writebuf = (u_char *) buf;
int status = -EINVAL;
- uint8_t *command;
+ u8 *command;
- pr_debug("%s: write 0x%x..0x%x\n",
- dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
+ dev_dbg(&spi->dev, "write 0x%x..0x%x\n",
+ (unsigned int)to, (unsigned int)(to + len));
spi_message_init(&msg);
@@ -328,7 +329,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
mutex_lock(&priv->lock);
while (remaining > 0) {
- pr_debug("write @ %i:%i len=%i\n",
+ dev_dbg(&spi->dev, "write @ %i:%i len=%i\n",
pageaddr, offset, writelen);
/* REVISIT:
@@ -356,13 +357,13 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = 0;
- pr_debug("TRANSFER: (%x) %x %x %x\n",
+ dev_dbg(&spi->dev, "TRANSFER: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
status = spi_sync(spi, &msg);
if (status < 0)
- pr_debug("%s: xfer %u -> %d\n",
- dev_name(&spi->dev), addr, status);
+ dev_dbg(&spi->dev, "xfer %u -> %d\n",
+ addr, status);
(void) dataflash_waitready(priv->spi);
}
@@ -374,7 +375,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = (addr & 0x000000FF);
- pr_debug("PROGRAM: (%x) %x %x %x\n",
+ dev_dbg(&spi->dev, "PROGRAM: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
x[1].tx_buf = writebuf;
@@ -383,8 +384,8 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
status = spi_sync(spi, &msg);
spi_transfer_del(x + 1);
if (status < 0)
- pr_debug("%s: pgm %u/%u -> %d\n",
- dev_name(&spi->dev), addr, writelen, status);
+ dev_dbg(&spi->dev, "pgm %u/%u -> %d\n",
+ addr, writelen, status);
(void) dataflash_waitready(priv->spi);
@@ -398,20 +399,20 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = 0;
- pr_debug("COMPARE: (%x) %x %x %x\n",
+ dev_dbg(&spi->dev, "COMPARE: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
status = spi_sync(spi, &msg);
if (status < 0)
- pr_debug("%s: compare %u -> %d\n",
- dev_name(&spi->dev), addr, status);
+ dev_dbg(&spi->dev, "compare %u -> %d\n",
+ addr, status);
status = dataflash_waitready(priv->spi);
/* Check result of the compare operation */
if (status & (1 << 6)) {
- printk(KERN_ERR "%s: compare page %u, err %d\n",
- dev_name(&spi->dev), pageaddr, status);
+ dev_err(&spi->dev, "compare page %u, err %d\n",
+ pageaddr, status);
remaining = 0;
status = -EIO;
break;
@@ -455,11 +456,11 @@ static int dataflash_get_otp_info(struct mtd_info *mtd, size_t len,
}
static ssize_t otp_read(struct spi_device *spi, unsigned base,
- uint8_t *buf, loff_t off, size_t len)
+ u8 *buf, loff_t off, size_t len)
{
struct spi_message m;
size_t l;
- uint8_t *scratch;
+ u8 *scratch;
struct spi_transfer t;
int status;
@@ -538,7 +539,7 @@ static int dataflash_write_user_otp(struct mtd_info *mtd,
{
struct spi_message m;
const size_t l = 4 + 64;
- uint8_t *scratch;
+ u8 *scratch;
struct spi_transfer t;
struct dataflash *priv = mtd->priv;
int status;
@@ -689,14 +690,15 @@ struct flash_info {
/* JEDEC id has a high byte of zero plus three data bytes:
* the manufacturer id, then a two byte device id.
*/
- uint32_t jedec_id;
+ u64 jedec_id;
/* The size listed here is what works with OP_ERASE_PAGE. */
unsigned nr_pages;
- uint16_t pagesize;
- uint16_t pageoffset;
+ u16 pagesize;
+ u16 pageoffset;
- uint16_t flags;
+ u16 flags;
+#define SUP_EXTID 0x0004 /* supports extended ID data */
#define SUP_POW2PS 0x0002 /* supports 2^N byte pages */
#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
};
@@ -734,54 +736,32 @@ static struct flash_info dataflash_data[] = {
{ "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
{ "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB641E", 0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
+ { "at45db641e", 0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
};
-static struct flash_info *jedec_probe(struct spi_device *spi)
+static struct flash_info *jedec_lookup(struct spi_device *spi,
+ u64 jedec, bool use_extid)
{
- int tmp;
- uint8_t code = OP_READ_ID;
- uint8_t id[3];
- uint32_t jedec;
- struct flash_info *info;
+ struct flash_info *info;
int status;
- /* JEDEC also defines an optional "extended device information"
- * string for after vendor-specific data, after the three bytes
- * we use here. Supporting some chips might require using it.
- *
- * If the vendor ID isn't Atmel's (0x1f), assume this call failed.
- * That's not an error; only rev C and newer chips handle it, and
- * only Atmel sells these chips.
- */
- tmp = spi_write_then_read(spi, &code, 1, id, 3);
- if (tmp < 0) {
- pr_debug("%s: error %d reading JEDEC ID\n",
- dev_name(&spi->dev), tmp);
- return ERR_PTR(tmp);
- }
- if (id[0] != 0x1f)
- return NULL;
-
- jedec = id[0];
- jedec = jedec << 8;
- jedec |= id[1];
- jedec = jedec << 8;
- jedec |= id[2];
+ for (info = dataflash_data;
+ info < dataflash_data + ARRAY_SIZE(dataflash_data);
+ info++) {
+ if (use_extid && !(info->flags & SUP_EXTID))
+ continue;
- for (tmp = 0, info = dataflash_data;
- tmp < ARRAY_SIZE(dataflash_data);
- tmp++, info++) {
if (info->jedec_id == jedec) {
- pr_debug("%s: OTP, sector protect%s\n",
- dev_name(&spi->dev),
- (info->flags & SUP_POW2PS)
- ? ", binary pagesize" : ""
- );
+ dev_dbg(&spi->dev, "OTP, sector protect%s\n",
+ (info->flags & SUP_POW2PS) ?
+ ", binary pagesize" : "");
if (info->flags & SUP_POW2PS) {
status = dataflash_status(spi);
if (status < 0) {
- pr_debug("%s: status error %d\n",
- dev_name(&spi->dev), status);
+ dev_dbg(&spi->dev, "status error %d\n",
+ status);
return ERR_PTR(status);
}
if (status & 0x1) {
@@ -796,12 +776,58 @@ static struct flash_info *jedec_probe(struct spi_device *spi)
}
}
+ return ERR_PTR(-ENODEV);
+}
+
+static struct flash_info *jedec_probe(struct spi_device *spi)
+{
+ int ret;
+ u8 code = OP_READ_ID;
+ u64 jedec;
+ u8 id[sizeof(jedec)] = {0};
+ const unsigned int id_size = 5;
+ struct flash_info *info;
+
+ /*
+ * JEDEC also defines an optional "extended device information"
+ * string for after vendor-specific data, after the three bytes
+ * we use here. Supporting some chips might require using it.
+ *
+ * If the vendor ID isn't Atmel's (0x1f), assume this call failed.
+ * That's not an error; only rev C and newer chips handle it, and
+ * only Atmel sells these chips.
+ */
+ ret = spi_write_then_read(spi, &code, 1, id, id_size);
+ if (ret < 0) {
+ dev_dbg(&spi->dev, "error %d reading JEDEC ID\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ if (id[0] != CFI_MFR_ATMEL)
+ return NULL;
+
+ jedec = be64_to_cpup((__be64 *)id);
+
+ /*
+ * First, try to match device using extended device
+ * information
+ */
+ info = jedec_lookup(spi, jedec >> DATAFLASH_SHIFT_EXTID, true);
+ if (!IS_ERR(info))
+ return info;
+ /*
+ * If that fails, make another pass using regular ID
+ * information
+ */
+ info = jedec_lookup(spi, jedec >> DATAFLASH_SHIFT_ID, false);
+ if (!IS_ERR(info))
+ return info;
/*
* Treat other chips as errors ... we won't know the right page
* size (it might be binary) even when we can tell which density
* class is involved (legacy chip id scheme).
*/
- dev_warn(&spi->dev, "JEDEC id %06x not handled\n", jedec);
+ dev_warn(&spi->dev, "JEDEC id %016llx not handled\n", jedec);
return ERR_PTR(-ENODEV);
}
@@ -845,8 +871,7 @@ static int dataflash_probe(struct spi_device *spi)
*/
status = dataflash_status(spi);
if (status <= 0 || status == 0xff) {
- pr_debug("%s: status error %d\n",
- dev_name(&spi->dev), status);
+ dev_dbg(&spi->dev, "status error %d\n", status);
if (status == 0 || status == 0xff)
status = -ENODEV;
return status;
@@ -887,8 +912,7 @@ static int dataflash_probe(struct spi_device *spi)
}
if (status < 0)
- pr_debug("%s: add_dataflash --> %d\n", dev_name(&spi->dev),
- status);
+ dev_dbg(&spi->dev, "add_dataflash --> %d\n", status);
return status;
}
@@ -898,7 +922,7 @@ static int dataflash_remove(struct spi_device *spi)
struct dataflash *flash = spi_get_drvdata(spi);
int status;
- pr_debug("%s: remove\n", dev_name(&spi->dev));
+ dev_dbg(&spi->dev, "remove\n");
status = mtd_device_unregister(&flash->mtd);
if (status == 0)
diff --git a/drivers/mtd/devices/serial_flash_cmds.h b/drivers/mtd/devices/serial_flash_cmds.h
index 8b81e15105dd..eba125c9f23f 100644
--- a/drivers/mtd/devices/serial_flash_cmds.h
+++ b/drivers/mtd/devices/serial_flash_cmds.h
@@ -13,7 +13,6 @@
#define _MTD_SERIAL_FLASH_CMDS_H
/* Generic Flash Commands/OPCODEs */
-#define SPINOR_OP_RDSR2 0x35
#define SPINOR_OP_WRVCR 0x81
#define SPINOR_OP_RDVCR 0x85
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 804313a33f2b..21afd94cd904 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -1445,7 +1445,7 @@ static int stfsm_s25fl_config(struct stfsm *fsm)
}
/* Check status of 'QE' bit, update if required. */
- stfsm_read_status(fsm, SPINOR_OP_RDSR2, &cr1, 1);
+ stfsm_read_status(fsm, SPINOR_OP_RDCR, &cr1, 1);
data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
if (data_pads == 4) {
if (!(cr1 & STFSM_S25FL_CONFIG_QE)) {
@@ -1490,7 +1490,7 @@ static int stfsm_w25q_config(struct stfsm *fsm)
return ret;
/* Check status of 'QE' bit, update if required. */
- stfsm_read_status(fsm, SPINOR_OP_RDSR2, &sr2, 1);
+ stfsm_read_status(fsm, SPINOR_OP_RDCR, &sr2, 1);
data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
if (data_pads == 4) {
if (!(sr2 & W25Q_STATUS_QE)) {
diff --git a/drivers/mtd/maps/physmap_of_gemini.c b/drivers/mtd/maps/physmap_of_gemini.c
index 9d371cd728ea..05b286b5289f 100644
--- a/drivers/mtd/maps/physmap_of_gemini.c
+++ b/drivers/mtd/maps/physmap_of_gemini.c
@@ -59,7 +59,7 @@ int of_flash_probe_gemini(struct platform_device *pdev,
struct device_node *np,
struct map_info *map)
{
- static struct regmap *rmap;
+ struct regmap *rmap;
struct device *dev = &pdev->dev;
u32 val;
int ret;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 1517da3ddd7d..956382cea256 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -991,7 +991,7 @@ EXPORT_SYMBOL_GPL(mtd_point);
/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
- if (!mtd->_point)
+ if (!mtd->_unpoint)
return -EOPNOTSUPP;
if (from < 0 || from >= mtd->size || len > mtd->size - from)
return -EINVAL;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index ea5e5307f667..5736b0c90b33 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -37,10 +37,16 @@
static LIST_HEAD(mtd_partitions);
static DEFINE_MUTEX(mtd_partitions_mutex);
-/* Our partition node structure */
+/**
+ * struct mtd_part - our partition node structure
+ *
+ * @mtd: struct holding partition details
+ * @parent: parent mtd - flash device or another partition
+ * @offset: partition offset relative to the *flash device*
+ */
struct mtd_part {
struct mtd_info mtd;
- struct mtd_info *master;
+ struct mtd_info *parent;
uint64_t offset;
struct list_head list;
};
@@ -67,15 +73,15 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
struct mtd_ecc_stats stats;
int res;
- stats = part->master->ecc_stats;
- res = part->master->_read(part->master, from + part->offset, len,
+ stats = part->parent->ecc_stats;
+ res = part->parent->_read(part->parent, from + part->offset, len,
retlen, buf);
if (unlikely(mtd_is_eccerr(res)))
mtd->ecc_stats.failed +=
- part->master->ecc_stats.failed - stats.failed;
+ part->parent->ecc_stats.failed - stats.failed;
else
mtd->ecc_stats.corrected +=
- part->master->ecc_stats.corrected - stats.corrected;
+ part->parent->ecc_stats.corrected - stats.corrected;
return res;
}
@@ -84,7 +90,7 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_point(part->master, from + part->offset, len,
+ return part->parent->_point(part->parent, from + part->offset, len,
retlen, virt, phys);
}
@@ -92,7 +98,7 @@ static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_unpoint(part->master, from + part->offset, len);
+ return part->parent->_unpoint(part->parent, from + part->offset, len);
}
static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -103,7 +109,7 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
struct mtd_part *part = mtd_to_part(mtd);
offset += part->offset;
- return part->master->_get_unmapped_area(part->master, len, offset,
+ return part->parent->_get_unmapped_area(part->parent, len, offset,
flags);
}
@@ -132,7 +138,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- res = part->master->_read_oob(part->master, from + part->offset, ops);
+ res = part->parent->_read_oob(part->parent, from + part->offset, ops);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected++;
@@ -146,7 +152,7 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_read_user_prot_reg(part->master, from, len,
+ return part->parent->_read_user_prot_reg(part->parent, from, len,
retlen, buf);
}
@@ -154,7 +160,7 @@ static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_get_user_prot_info(part->master, len, retlen,
+ return part->parent->_get_user_prot_info(part->parent, len, retlen,
buf);
}
@@ -162,7 +168,7 @@ static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_read_fact_prot_reg(part->master, from, len,
+ return part->parent->_read_fact_prot_reg(part->parent, from, len,
retlen, buf);
}
@@ -170,7 +176,7 @@ static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_get_fact_prot_info(part->master, len, retlen,
+ return part->parent->_get_fact_prot_info(part->parent, len, retlen,
buf);
}
@@ -178,7 +184,7 @@ static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_write(part->master, to + part->offset, len,
+ return part->parent->_write(part->parent, to + part->offset, len,
retlen, buf);
}
@@ -186,7 +192,7 @@ static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_panic_write(part->master, to + part->offset, len,
+ return part->parent->_panic_write(part->parent, to + part->offset, len,
retlen, buf);
}
@@ -199,14 +205,14 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
if (ops->datbuf && to + ops->len > mtd->size)
return -EINVAL;
- return part->master->_write_oob(part->master, to + part->offset, ops);
+ return part->parent->_write_oob(part->parent, to + part->offset, ops);
}
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_write_user_prot_reg(part->master, from, len,
+ return part->parent->_write_user_prot_reg(part->parent, from, len,
retlen, buf);
}
@@ -214,14 +220,14 @@ static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_lock_user_prot_reg(part->master, from, len);
+ return part->parent->_lock_user_prot_reg(part->parent, from, len);
}
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_writev(part->master, vecs, count,
+ return part->parent->_writev(part->parent, vecs, count,
to + part->offset, retlen);
}
@@ -231,7 +237,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
int ret;
instr->addr += part->offset;
- ret = part->master->_erase(part->master, instr);
+ ret = part->parent->_erase(part->parent, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
@@ -257,51 +263,51 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_lock(part->master, ofs + part->offset, len);
+ return part->parent->_lock(part->parent, ofs + part->offset, len);
}
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_unlock(part->master, ofs + part->offset, len);
+ return part->parent->_unlock(part->parent, ofs + part->offset, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_is_locked(part->master, ofs + part->offset, len);
+ return part->parent->_is_locked(part->parent, ofs + part->offset, len);
}
static void part_sync(struct mtd_info *mtd)
{
struct mtd_part *part = mtd_to_part(mtd);
- part->master->_sync(part->master);
+ part->parent->_sync(part->parent);
}
static int part_suspend(struct mtd_info *mtd)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_suspend(part->master);
+ return part->parent->_suspend(part->parent);
}
static void part_resume(struct mtd_info *mtd)
{
struct mtd_part *part = mtd_to_part(mtd);
- part->master->_resume(part->master);
+ part->parent->_resume(part->parent);
}
static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = mtd_to_part(mtd);
ofs += part->offset;
- return part->master->_block_isreserved(part->master, ofs);
+ return part->parent->_block_isreserved(part->parent, ofs);
}
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = mtd_to_part(mtd);
ofs += part->offset;
- return part->master->_block_isbad(part->master, ofs);
+ return part->parent->_block_isbad(part->parent, ofs);
}
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -310,7 +316,7 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
int res;
ofs += part->offset;
- res = part->master->_block_markbad(part->master, ofs);
+ res = part->parent->_block_markbad(part->parent, ofs);
if (!res)
mtd->ecc_stats.badblocks++;
return res;
@@ -319,13 +325,13 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
static int part_get_device(struct mtd_info *mtd)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_get_device(part->master);
+ return part->parent->_get_device(part->parent);
}
static void part_put_device(struct mtd_info *mtd)
{
struct mtd_part *part = mtd_to_part(mtd);
- part->master->_put_device(part->master);
+ part->parent->_put_device(part->parent);
}
static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
@@ -333,7 +339,7 @@ static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
{
struct mtd_part *part = mtd_to_part(mtd);
- return mtd_ooblayout_ecc(part->master, section, oobregion);
+ return mtd_ooblayout_ecc(part->parent, section, oobregion);
}
static int part_ooblayout_free(struct mtd_info *mtd, int section,
@@ -341,7 +347,7 @@ static int part_ooblayout_free(struct mtd_info *mtd, int section,
{
struct mtd_part *part = mtd_to_part(mtd);
- return mtd_ooblayout_free(part->master, section, oobregion);
+ return mtd_ooblayout_free(part->parent, section, oobregion);
}
static const struct mtd_ooblayout_ops part_ooblayout_ops = {
@@ -353,7 +359,7 @@ static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
{
struct mtd_part *part = mtd_to_part(mtd);
- return part->master->_max_bad_blocks(part->master,
+ return part->parent->_max_bad_blocks(part->parent,
ofs + part->offset, len);
}
@@ -363,63 +369,70 @@ static inline void free_partition(struct mtd_part *p)
kfree(p);
}
-/*
- * This function unregisters and destroy all slave MTD objects which are
- * attached to the given master MTD object.
+/**
+ * mtd_parse_part - parse MTD partition looking for subpartitions
+ *
+ * @slave: part that is supposed to be a container and should be parsed
+ * @types: NULL-terminated array with names of partition parsers to try
+ *
+ * Some partitions are kind of containers with extra subpartitions (volumes).
+ * There can be various formats of such containers. This function tries to use
+ * specified parsers to analyze given partition and registers found
+ * subpartitions on success.
*/
-
-int del_mtd_partitions(struct mtd_info *master)
+static int mtd_parse_part(struct mtd_part *slave, const char *const *types)
{
- struct mtd_part *slave, *next;
- int ret, err = 0;
+ struct mtd_partitions parsed;
+ int err;
- mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry_safe(slave, next, &mtd_partitions, list)
- if (slave->master == master) {
- ret = del_mtd_device(&slave->mtd);
- if (ret < 0) {
- err = ret;
- continue;
- }
- list_del(&slave->list);
- free_partition(slave);
- }
- mutex_unlock(&mtd_partitions_mutex);
+ err = parse_mtd_partitions(&slave->mtd, types, &parsed, NULL);
+ if (err)
+ return err;
+ else if (!parsed.nr_parts)
+ return -ENOENT;
+
+ err = add_mtd_partitions(&slave->mtd, parsed.parts, parsed.nr_parts);
+
+ mtd_part_parser_cleanup(&parsed);
return err;
}
-static struct mtd_part *allocate_partition(struct mtd_info *master,
+static struct mtd_part *allocate_partition(struct mtd_info *parent,
const struct mtd_partition *part, int partno,
uint64_t cur_offset)
{
+ int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize :
+ parent->erasesize;
struct mtd_part *slave;
+ u32 remainder;
char *name;
+ u64 tmp;
/* allocate the partition structure */
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
name = kstrdup(part->name, GFP_KERNEL);
if (!name || !slave) {
printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
- master->name);
+ parent->name);
kfree(name);
kfree(slave);
return ERR_PTR(-ENOMEM);
}
/* set up the MTD object for this partition */
- slave->mtd.type = master->type;
- slave->mtd.flags = master->flags & ~part->mask_flags;
+ slave->mtd.type = parent->type;
+ slave->mtd.flags = parent->flags & ~part->mask_flags;
slave->mtd.size = part->size;
- slave->mtd.writesize = master->writesize;
- slave->mtd.writebufsize = master->writebufsize;
- slave->mtd.oobsize = master->oobsize;
- slave->mtd.oobavail = master->oobavail;
- slave->mtd.subpage_sft = master->subpage_sft;
- slave->mtd.pairing = master->pairing;
+ slave->mtd.writesize = parent->writesize;
+ slave->mtd.writebufsize = parent->writebufsize;
+ slave->mtd.oobsize = parent->oobsize;
+ slave->mtd.oobavail = parent->oobavail;
+ slave->mtd.subpage_sft = parent->subpage_sft;
+ slave->mtd.pairing = parent->pairing;
slave->mtd.name = name;
- slave->mtd.owner = master->owner;
+ slave->mtd.owner = parent->owner;
/* NOTE: Historically, we didn't arrange MTDs as a tree out of
* concern for showing the same data in multiple partitions.
@@ -429,80 +442,81 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
* parent conditional on that option. Note, this is a way to
* distinguish between the master and the partition in sysfs.
*/
- slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ?
- &master->dev :
- master->dev.parent;
+ slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
+ &parent->dev :
+ parent->dev.parent;
slave->mtd.dev.of_node = part->of_node;
slave->mtd._read = part_read;
slave->mtd._write = part_write;
- if (master->_panic_write)
+ if (parent->_panic_write)
slave->mtd._panic_write = part_panic_write;
- if (master->_point && master->_unpoint) {
+ if (parent->_point && parent->_unpoint) {
slave->mtd._point = part_point;
slave->mtd._unpoint = part_unpoint;
}
- if (master->_get_unmapped_area)
+ if (parent->_get_unmapped_area)
slave->mtd._get_unmapped_area = part_get_unmapped_area;
- if (master->_read_oob)
+ if (parent->_read_oob)
slave->mtd._read_oob = part_read_oob;
- if (master->_write_oob)
+ if (parent->_write_oob)
slave->mtd._write_oob = part_write_oob;
- if (master->_read_user_prot_reg)
+ if (parent->_read_user_prot_reg)
slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
- if (master->_read_fact_prot_reg)
+ if (parent->_read_fact_prot_reg)
slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
- if (master->_write_user_prot_reg)
+ if (parent->_write_user_prot_reg)
slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
- if (master->_lock_user_prot_reg)
+ if (parent->_lock_user_prot_reg)
slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
- if (master->_get_user_prot_info)
+ if (parent->_get_user_prot_info)
slave->mtd._get_user_prot_info = part_get_user_prot_info;
- if (master->_get_fact_prot_info)
+ if (parent->_get_fact_prot_info)
slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
- if (master->_sync)
+ if (parent->_sync)
slave->mtd._sync = part_sync;
- if (!partno && !master->dev.class && master->_suspend &&
- master->_resume) {
- slave->mtd._suspend = part_suspend;
- slave->mtd._resume = part_resume;
+ if (!partno && !parent->dev.class && parent->_suspend &&
+ parent->_resume) {
+ slave->mtd._suspend = part_suspend;
+ slave->mtd._resume = part_resume;
}
- if (master->_writev)
+ if (parent->_writev)
slave->mtd._writev = part_writev;
- if (master->_lock)
+ if (parent->_lock)
slave->mtd._lock = part_lock;
- if (master->_unlock)
+ if (parent->_unlock)
slave->mtd._unlock = part_unlock;
- if (master->_is_locked)
+ if (parent->_is_locked)
slave->mtd._is_locked = part_is_locked;
- if (master->_block_isreserved)
+ if (parent->_block_isreserved)
slave->mtd._block_isreserved = part_block_isreserved;
- if (master->_block_isbad)
+ if (parent->_block_isbad)
slave->mtd._block_isbad = part_block_isbad;
- if (master->_block_markbad)
+ if (parent->_block_markbad)
slave->mtd._block_markbad = part_block_markbad;
- if (master->_max_bad_blocks)
+ if (parent->_max_bad_blocks)
slave->mtd._max_bad_blocks = part_max_bad_blocks;
- if (master->_get_device)
+ if (parent->_get_device)
slave->mtd._get_device = part_get_device;
- if (master->_put_device)
+ if (parent->_put_device)
slave->mtd._put_device = part_put_device;
slave->mtd._erase = part_erase;
- slave->master = master;
+ slave->parent = parent;
slave->offset = part->offset;
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
if (slave->offset == MTDPART_OFS_NXTBLK) {
+ tmp = cur_offset;
slave->offset = cur_offset;
- if (mtd_mod_by_eb(cur_offset, master) != 0) {
- /* Round up to next erasesize */
- slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
+ remainder = do_div(tmp, wr_alignment);
+ if (remainder) {
+ slave->offset += wr_alignment - remainder;
printk(KERN_NOTICE "Moving partition %d: "
"0x%012llx -> 0x%012llx\n", partno,
(unsigned long long)cur_offset, (unsigned long long)slave->offset);
@@ -510,25 +524,25 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
}
if (slave->offset == MTDPART_OFS_RETAIN) {
slave->offset = cur_offset;
- if (master->size - slave->offset >= slave->mtd.size) {
- slave->mtd.size = master->size - slave->offset
+ if (parent->size - slave->offset >= slave->mtd.size) {
+ slave->mtd.size = parent->size - slave->offset
- slave->mtd.size;
} else {
printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
- part->name, master->size - slave->offset,
+ part->name, parent->size - slave->offset,
slave->mtd.size);
/* register to preserve ordering */
goto out_register;
}
}
if (slave->mtd.size == MTDPART_SIZ_FULL)
- slave->mtd.size = master->size - slave->offset;
+ slave->mtd.size = parent->size - slave->offset;
printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
/* let's do some sanity checks */
- if (slave->offset >= master->size) {
+ if (slave->offset >= parent->size) {
/* let's register it anyway to preserve ordering */
slave->offset = 0;
slave->mtd.size = 0;
@@ -536,16 +550,16 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
part->name);
goto out_register;
}
- if (slave->offset + slave->mtd.size > master->size) {
- slave->mtd.size = master->size - slave->offset;
+ if (slave->offset + slave->mtd.size > parent->size) {
+ slave->mtd.size = parent->size - slave->offset;
printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
- part->name, master->name, (unsigned long long)slave->mtd.size);
+ part->name, parent->name, (unsigned long long)slave->mtd.size);
}
- if (master->numeraseregions > 1) {
+ if (parent->numeraseregions > 1) {
/* Deal with variable erase size stuff */
- int i, max = master->numeraseregions;
+ int i, max = parent->numeraseregions;
u64 end = slave->offset + slave->mtd.size;
- struct mtd_erase_region_info *regions = master->eraseregions;
+ struct mtd_erase_region_info *regions = parent->eraseregions;
/* Find the first erase regions which is part of this
* partition. */
@@ -564,37 +578,40 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
BUG_ON(slave->mtd.erasesize == 0);
} else {
/* Single erase size */
- slave->mtd.erasesize = master->erasesize;
+ slave->mtd.erasesize = parent->erasesize;
}
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
- mtd_mod_by_eb(slave->offset, &slave->mtd)) {
+ tmp = slave->offset;
+ remainder = do_div(tmp, wr_alignment);
+ if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of
* _minor_ erase size though */
slave->mtd.flags &= ~MTD_WRITEABLE;
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
part->name);
}
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
- mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
+
+ tmp = slave->mtd.size;
+ remainder = do_div(tmp, wr_alignment);
+ if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
slave->mtd.flags &= ~MTD_WRITEABLE;
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
part->name);
}
mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
- slave->mtd.ecc_step_size = master->ecc_step_size;
- slave->mtd.ecc_strength = master->ecc_strength;
- slave->mtd.bitflip_threshold = master->bitflip_threshold;
+ slave->mtd.ecc_step_size = parent->ecc_step_size;
+ slave->mtd.ecc_strength = parent->ecc_strength;
+ slave->mtd.bitflip_threshold = parent->bitflip_threshold;
- if (master->_block_isbad) {
+ if (parent->_block_isbad) {
uint64_t offs = 0;
while (offs < slave->mtd.size) {
- if (mtd_block_isreserved(master, offs + slave->offset))
+ if (mtd_block_isreserved(parent, offs + slave->offset))
slave->mtd.ecc_stats.bbtblocks++;
- else if (mtd_block_isbad(master, offs + slave->offset))
+ else if (mtd_block_isbad(parent, offs + slave->offset))
slave->mtd.ecc_stats.badblocks++;
offs += slave->mtd.erasesize;
}
@@ -628,7 +645,7 @@ static int mtd_add_partition_attrs(struct mtd_part *new)
return ret;
}
-int mtd_add_partition(struct mtd_info *master, const char *name,
+int mtd_add_partition(struct mtd_info *parent, const char *name,
long long offset, long long length)
{
struct mtd_partition part;
@@ -641,7 +658,7 @@ int mtd_add_partition(struct mtd_info *master, const char *name,
return -EINVAL;
if (length == MTDPART_SIZ_FULL)
- length = master->size - offset;
+ length = parent->size - offset;
if (length <= 0)
return -EINVAL;
@@ -651,7 +668,7 @@ int mtd_add_partition(struct mtd_info *master, const char *name,
part.size = length;
part.offset = offset;
- new = allocate_partition(master, &part, -1, offset);
+ new = allocate_partition(parent, &part, -1, offset);
if (IS_ERR(new))
return PTR_ERR(new);
@@ -667,23 +684,69 @@ int mtd_add_partition(struct mtd_info *master, const char *name,
}
EXPORT_SYMBOL_GPL(mtd_add_partition);
-int mtd_del_partition(struct mtd_info *master, int partno)
+/**
+ * __mtd_del_partition - delete MTD partition
+ *
+ * @priv: internal MTD struct for partition to be deleted
+ *
+ * This function must be called with the partitions mutex locked.
+ */
+static int __mtd_del_partition(struct mtd_part *priv)
+{
+ struct mtd_part *child, *next;
+ int err;
+
+ list_for_each_entry_safe(child, next, &mtd_partitions, list) {
+ if (child->parent == &priv->mtd) {
+ err = __mtd_del_partition(child);
+ if (err)
+ return err;
+ }
+ }
+
+ sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs);
+
+ err = del_mtd_device(&priv->mtd);
+ if (err)
+ return err;
+
+ list_del(&priv->list);
+ free_partition(priv);
+
+ return 0;
+}
+
+/*
+ * This function unregisters and destroy all slave MTD objects which are
+ * attached to the given MTD object.
+ */
+int del_mtd_partitions(struct mtd_info *mtd)
{
struct mtd_part *slave, *next;
- int ret = -EINVAL;
+ int ret, err = 0;
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
- if ((slave->master == master) &&
- (slave->mtd.index == partno)) {
- sysfs_remove_files(&slave->mtd.dev.kobj,
- mtd_partition_attrs);
- ret = del_mtd_device(&slave->mtd);
+ if (slave->parent == mtd) {
+ ret = __mtd_del_partition(slave);
if (ret < 0)
- break;
+ err = ret;
+ }
+ mutex_unlock(&mtd_partitions_mutex);
+
+ return err;
+}
+
+int mtd_del_partition(struct mtd_info *mtd, int partno)
+{
+ struct mtd_part *slave, *next;
+ int ret = -EINVAL;
- list_del(&slave->list);
- free_partition(slave);
+ mutex_lock(&mtd_partitions_mutex);
+ list_for_each_entry_safe(slave, next, &mtd_partitions, list)
+ if ((slave->parent == mtd) &&
+ (slave->mtd.index == partno)) {
+ ret = __mtd_del_partition(slave);
break;
}
mutex_unlock(&mtd_partitions_mutex);
@@ -724,6 +787,8 @@ int add_mtd_partitions(struct mtd_info *master,
add_mtd_device(&slave->mtd);
mtd_add_partition_attrs(slave);
+ if (parts[i].types)
+ mtd_parse_part(slave, parts[i].types);
cur_offset = slave->offset + slave->mtd.size;
}
@@ -799,6 +864,27 @@ static const char * const default_mtd_part_types[] = {
NULL
};
+static int mtd_part_do_parse(struct mtd_part_parser *parser,
+ struct mtd_info *master,
+ struct mtd_partitions *pparts,
+ struct mtd_part_parser_data *data)
+{
+ int ret;
+
+ ret = (*parser->parse_fn)(master, &pparts->parts, data);
+ pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret);
+ if (ret <= 0)
+ return ret;
+
+ pr_notice("%d %s partitions found on MTD device %s\n", ret,
+ parser->name, master->name);
+
+ pparts->nr_parts = ret;
+ pparts->parser = parser;
+
+ return ret;
+}
+
/**
* parse_mtd_partitions - parse MTD partitions
* @master: the master partition (describes whole MTD device)
@@ -839,16 +925,10 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
parser ? parser->name : NULL);
if (!parser)
continue;
- ret = (*parser->parse_fn)(master, &pparts->parts, data);
- pr_debug("%s: parser %s: %i\n",
- master->name, parser->name, ret);
- if (ret > 0) {
- printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
- ret, parser->name, master->name);
- pparts->nr_parts = ret;
- pparts->parser = parser;
+ ret = mtd_part_do_parse(parser, master, pparts, data);
+ /* Found partitions! */
+ if (ret > 0)
return 0;
- }
mtd_part_parser_put(parser);
/*
* Stash the first error we see; only report it if no parser
@@ -899,6 +979,6 @@ uint64_t mtd_get_device_size(const struct mtd_info *mtd)
if (!mtd_is_partition(mtd))
return mtd->size;
- return mtd_to_part(mtd)->master->size;
+ return mtd_get_device_size(mtd_to_part(mtd)->parent);
}
EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index c3029528063b..dbfa72d61d5a 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -308,6 +308,7 @@ config MTD_NAND_CS553X
config MTD_NAND_ATMEL
tristate "Support for NAND Flash / SmartMedia on AT91"
depends on ARCH_AT91
+ select MFD_ATMEL_SMC
help
Enables support for NAND Flash / Smart Media Card interface
on Atmel AT91 processors.
@@ -542,6 +543,7 @@ config MTD_NAND_SUNXI
config MTD_NAND_HISI504
tristate "Support for NAND controller on Hisilicon SoC Hip04"
+ depends on ARCH_HISI || COMPILE_TEST
depends on HAS_DMA
help
Enables support for NAND controller on Hisilicon SoC Hip04.
@@ -555,6 +557,7 @@ config MTD_NAND_QCOM
config MTD_NAND_MTK
tristate "Support for NAND controller on MTK SoCs"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
depends on HAS_DMA
help
Enables support for NAND controller on MTK SoCs.
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index 3b2446896147..d922a88e407f 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -57,6 +57,7 @@
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/atmel-matrix.h>
+#include <linux/mfd/syscon/atmel-smc.h>
#include <linux/module.h>
#include <linux/mtd/nand.h>
#include <linux/of_address.h>
@@ -64,7 +65,6 @@
#include <linux/of_platform.h>
#include <linux/iopoll.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/atmel.h>
#include <linux/regmap.h>
#include "pmecc.h"
@@ -151,6 +151,8 @@ struct atmel_nand_cs {
void __iomem *virt;
dma_addr_t dma;
} io;
+
+ struct atmel_smc_cs_conf smcconf;
};
struct atmel_nand {
@@ -196,6 +198,8 @@ struct atmel_nand_controller_ops {
void (*nand_init)(struct atmel_nand_controller *nc,
struct atmel_nand *nand);
int (*ecc_init)(struct atmel_nand *nand);
+ int (*setup_data_interface)(struct atmel_nand *nand, int csline,
+ const struct nand_data_interface *conf);
};
struct atmel_nand_controller_caps {
@@ -912,7 +916,7 @@ static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_hsmc_nand_controller *nc;
- int ret;
+ int ret, status;
nc = to_hsmc_nand_controller(chip->controller);
@@ -954,6 +958,10 @@ static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n",
ret);
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
return ret;
}
@@ -1175,6 +1183,295 @@ static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
return 0;
}
+static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
+ const struct nand_data_interface *conf,
+ struct atmel_smc_cs_conf *smcconf)
+{
+ u32 ncycles, totalcycles, timeps, mckperiodps;
+ struct atmel_nand_controller *nc;
+ int ret;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ /* DDR interface not supported. */
+ if (conf->type != NAND_SDR_IFACE)
+ return -ENOTSUPP;
+
+ /*
+ * tRC < 30ns implies EDO mode. This controller does not support this
+ * mode.
+ */
+ if (conf->timings.sdr.tRC_min < 30)
+ return -ENOTSUPP;
+
+ atmel_smc_cs_conf_init(smcconf);
+
+ mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck);
+ mckperiodps *= 1000;
+
+ /*
+ * Set write pulse timing. This one is easy to extract:
+ *
+ * NWE_PULSE = tWP
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps);
+ totalcycles = ncycles;
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * The write setup timing depends on the operation done on the NAND.
+ * All operations goes through the same data bus, but the operation
+ * type depends on the address we are writing to (ALE/CLE address
+ * lines).
+ * Since we have no way to differentiate the different operations at
+ * the SMC level, we must consider the worst case (the biggest setup
+ * time among all operation types):
+ *
+ * NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE
+ */
+ timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min,
+ conf->timings.sdr.tALS_min);
+ timeps = max(timeps, conf->timings.sdr.tDS_min);
+ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
+ ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0;
+ totalcycles += ncycles;
+ ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * As for the write setup timing, the write hold timing depends on the
+ * operation done on the NAND:
+ *
+ * NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH)
+ */
+ timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min,
+ conf->timings.sdr.tALH_min);
+ timeps = max3(timeps, conf->timings.sdr.tDH_min,
+ conf->timings.sdr.tWH_min);
+ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
+ totalcycles += ncycles;
+
+ /*
+ * The write cycle timing is directly matching tWC, but is also
+ * dependent on the other timings on the setup and hold timings we
+ * calculated earlier, which gives:
+ *
+ * NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD)
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps);
+ ncycles = max(totalcycles, ncycles);
+ ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * We don't want the CS line to be toggled between each byte/word
+ * transfer to the NAND. The only way to guarantee that is to have the
+ * NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
+ *
+ * NCS_WR_PULSE = NWE_CYCLE
+ */
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * As for the write setup timing, the read hold timing depends on the
+ * operation done on the NAND:
+ *
+ * NRD_HOLD = max(tREH, tRHOH)
+ */
+ timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min);
+ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
+ totalcycles = ncycles;
+
+ /*
+ * TDF = tRHZ - NRD_HOLD
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps);
+ ncycles -= totalcycles;
+
+ /*
+ * In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and
+ * we might end up with a config that does not fit in the TDF field.
+ * Just take the max value in this case and hope that the NAND is more
+ * tolerant than advertised.
+ */
+ if (ncycles > ATMEL_SMC_MODE_TDF_MAX)
+ ncycles = ATMEL_SMC_MODE_TDF_MAX;
+ else if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
+ ncycles = ATMEL_SMC_MODE_TDF_MIN;
+
+ smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) |
+ ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
+
+ /*
+ * Read pulse timing directly matches tRP:
+ *
+ * NRD_PULSE = tRP
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
+ totalcycles += ncycles;
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * The write cycle timing is directly matching tWC, but is also
+ * dependent on the setup and hold timings we calculated earlier,
+ * which gives:
+ *
+ * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
+ *
+ * NRD_SETUP is always 0.
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
+ ncycles = max(totalcycles, ncycles);
+ ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * We don't want the CS line to be toggled between each byte/word
+ * transfer from the NAND. The only way to guarantee that is to have
+ * the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
+ *
+ * NCS_RD_PULSE = NRD_CYCLE
+ */
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /* Txxx timings are directly matching tXXX ones. */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TCLR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TADL_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TAR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TRR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TWB_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /* Attach the CS line to the NFC logic. */
+ smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL;
+
+ /* Set the appropriate data bus width. */
+ if (nand->base.options & NAND_BUSWIDTH_16)
+ smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
+
+ /* Operate in NRD/NWE READ/WRITEMODE. */
+ smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD |
+ ATMEL_SMC_MODE_WRITEMODE_NWE;
+
+ return 0;
+}
+
+static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand,
+ int csline,
+ const struct nand_data_interface *conf)
+{
+ struct atmel_nand_controller *nc;
+ struct atmel_smc_cs_conf smcconf;
+ struct atmel_nand_cs *cs;
+ int ret;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
+ if (ret)
+ return ret;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ cs = &nand->cs[csline];
+ cs->smcconf = smcconf;
+ atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
+
+ return 0;
+}
+
+static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand,
+ int csline,
+ const struct nand_data_interface *conf)
+{
+ struct atmel_nand_controller *nc;
+ struct atmel_smc_cs_conf smcconf;
+ struct atmel_nand_cs *cs;
+ int ret;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
+ if (ret)
+ return ret;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ cs = &nand->cs[csline];
+ cs->smcconf = smcconf;
+
+ if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
+ cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
+
+ atmel_hsmc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
+
+ return 0;
+}
+
+static int atmel_nand_setup_data_interface(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_nand_controller *nc;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ if (csline >= nand->numcs ||
+ (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
+ return -EINVAL;
+
+ return nc->caps->ops->setup_data_interface(nand, csline, conf);
+}
+
static void atmel_nand_init(struct atmel_nand_controller *nc,
struct atmel_nand *nand)
{
@@ -1192,6 +1489,9 @@ static void atmel_nand_init(struct atmel_nand_controller *nc,
chip->write_buf = atmel_nand_write_buf;
chip->select_chip = atmel_nand_select_chip;
+ if (nc->mck && nc->caps->ops->setup_data_interface)
+ chip->setup_data_interface = atmel_nand_setup_data_interface;
+
/* Some NANDs require a longer delay than the default one (20us). */
chip->chip_delay = 40;
@@ -1677,6 +1977,12 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
if (nc->caps->legacy_of_bindings)
return 0;
+ nc->mck = of_clk_get(dev->parent->of_node, 0);
+ if (IS_ERR(nc->mck)) {
+ dev_err(dev, "Failed to retrieve MCK clk\n");
+ return PTR_ERR(nc->mck);
+ }
+
np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
if (!np) {
dev_err(dev, "Missing or invalid atmel,smc property\n");
@@ -1983,6 +2289,7 @@ static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
.remove = atmel_hsmc_nand_controller_remove,
.ecc_init = atmel_hsmc_nand_ecc_init,
.nand_init = atmel_hsmc_nand_init,
+ .setup_data_interface = atmel_hsmc_nand_setup_data_interface,
};
static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
@@ -2037,7 +2344,14 @@ atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
return 0;
}
-static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
+/*
+ * The SMC reg layout of at91rm9200 is completely different which prevents us
+ * from re-using atmel_smc_nand_setup_data_interface() for the
+ * ->setup_data_interface() hook.
+ * At this point, there's no support for the at91rm9200 SMC IP, so we leave
+ * ->setup_data_interface() unassigned.
+ */
+static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
.probe = atmel_smc_nand_controller_probe,
.remove = atmel_smc_nand_controller_remove,
.ecc_init = atmel_nand_ecc_init,
@@ -2047,6 +2361,20 @@ static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
.ale_offs = BIT(21),
.cle_offs = BIT(22),
+ .ops = &at91rm9200_nc_ops,
+};
+
+static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
+ .probe = atmel_smc_nand_controller_probe,
+ .remove = atmel_smc_nand_controller_remove,
+ .ecc_init = atmel_nand_ecc_init,
+ .nand_init = atmel_smc_nand_init,
+ .setup_data_interface = atmel_smc_nand_setup_data_interface,
+};
+
+static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
.ops = &atmel_smc_nc_ops,
};
@@ -2093,7 +2421,7 @@ static const struct of_device_id atmel_nand_controller_of_ids[] = {
},
{
.compatible = "atmel,at91sam9260-nand-controller",
- .data = &atmel_rm9200_nc_caps,
+ .data = &atmel_sam9260_nc_caps,
},
{
.compatible = "atmel,at91sam9261-nand-controller",
@@ -2181,6 +2509,24 @@ static int atmel_nand_controller_remove(struct platform_device *pdev)
return nc->caps->ops->remove(nc);
}
+static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
+{
+ struct atmel_nand_controller *nc = dev_get_drvdata(dev);
+ struct atmel_nand *nand;
+
+ list_for_each_entry(nand, &nc->chips, node) {
+ int i;
+
+ for (i = 0; i < nand->numcs; i++)
+ nand_reset(&nand->base, i);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
+ atmel_nand_controller_resume);
+
static struct platform_driver atmel_nand_controller_driver = {
.driver = {
.name = "atmel-nand-controller",
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
index f1da4ea88f2c..54bac5b73f0a 100644
--- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -392,6 +392,8 @@ int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
+ b47n->nand_chip.onfi_set_features = nand_onfi_get_set_features_notsupp;
+ b47n->nand_chip.onfi_get_features = nand_onfi_get_set_features_notsupp;
nand_chip->chip_delay = 50;
b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index d40c32d311d8..2fd733eba0a3 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -654,6 +654,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe->nand.read_buf = cafe_read_buf;
cafe->nand.write_buf = cafe_write_buf;
cafe->nand.select_chip = cafe_select_chip;
+ cafe->nand.onfi_set_features = nand_onfi_get_set_features_notsupp;
+ cafe->nand.onfi_get_features = nand_onfi_get_set_features_notsupp;
cafe->nand.chip_delay = 0;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 531c51991e57..7b26e53b95b1 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -771,11 +771,14 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
info->chip.ecc.bytes = 10;
info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
+ info->chip.ecc.algo = NAND_ECC_BCH;
} else {
+ /* 1bit ecc hamming */
info->chip.ecc.calculate = nand_davinci_calculate_1bit;
info->chip.ecc.correct = nand_davinci_correct_1bit;
info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
info->chip.ecc.bytes = 3;
+ info->chip.ecc.algo = NAND_ECC_HAMMING;
}
info->chip.ecc.size = 512;
info->chip.ecc.strength = pdata->ecc_bits;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 16634df2e39a..d723be352148 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -23,50 +23,43 @@
#include <linux/mutex.h>
#include <linux/mtd/mtd.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include "denali.h"
MODULE_LICENSE("GPL");
-/*
- * We define a module parameter that allows the user to override
- * the hardware and decide what timing mode should be used.
- */
-#define NAND_DEFAULT_TIMINGS -1
+#define DENALI_NAND_NAME "denali-nand"
-static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
-module_param(onfi_timing_mode, int, S_IRUGO);
-MODULE_PARM_DESC(onfi_timing_mode,
- "Overrides default ONFI setting. -1 indicates use default timings");
+/* Host Data/Command Interface */
+#define DENALI_HOST_ADDR 0x00
+#define DENALI_HOST_DATA 0x10
-#define DENALI_NAND_NAME "denali-nand"
+#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
+#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
+#define DENALI_MAP10 (2 << 26) /* high-level control plane */
+#define DENALI_MAP11 (3 << 26) /* direct controller access */
-/*
- * We define a macro here that combines all interrupts this driver uses into
- * a single constant value, for convenience.
- */
-#define DENALI_IRQ_ALL (INTR__DMA_CMD_COMP | \
- INTR__ECC_TRANSACTION_DONE | \
- INTR__ECC_ERR | \
- INTR__PROGRAM_FAIL | \
- INTR__LOAD_COMP | \
- INTR__PROGRAM_COMP | \
- INTR__TIME_OUT | \
- INTR__ERASE_FAIL | \
- INTR__RST_COMP | \
- INTR__ERASE_COMP)
+/* MAP11 access cycle type */
+#define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
+#define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
+#define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
-/*
- * indicates whether or not the internal value for the flash bank is
- * valid or not
- */
-#define CHIP_SELECT_INVALID -1
+/* MAP10 commands */
+#define DENALI_ERASE 0x01
+
+#define DENALI_BANK(denali) ((denali)->active_bank << 24)
+
+#define DENALI_INVALID_BANK -1
+#define DENALI_NR_BANKS 4
/*
- * This macro divides two integers and rounds fractional values up
- * to the nearest integer value.
+ * The bus interface clock, clk_x, is phase aligned with the core clock. The
+ * clk_x is an integral multiple N of the core clk. The value N is configured
+ * at IP delivery time, and its available value is 4, 5, or 6. We need to align
+ * to the largest value to make it work with any possible configuration.
*/
-#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
+#define DENALI_CLK_X_MULT 6
/*
* this macro allows us to convert from an MTD structure to our own
@@ -77,339 +70,11 @@ static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
}
-/*
- * These constants are defined by the driver to enable common driver
- * configuration options.
- */
-#define SPARE_ACCESS 0x41
-#define MAIN_ACCESS 0x42
-#define MAIN_SPARE_ACCESS 0x43
-
-#define DENALI_READ 0
-#define DENALI_WRITE 0x100
-
-/*
- * this is a helper macro that allows us to
- * format the bank into the proper bits for the controller
- */
-#define BANK(x) ((x) << 24)
-
-/* forward declarations */
-static void clear_interrupts(struct denali_nand_info *denali);
-static uint32_t wait_for_irq(struct denali_nand_info *denali,
- uint32_t irq_mask);
-static void denali_irq_enable(struct denali_nand_info *denali,
- uint32_t int_mask);
-static uint32_t read_interrupt_status(struct denali_nand_info *denali);
-
-/*
- * Certain operations for the denali NAND controller use an indexed mode to
- * read/write data. The operation is performed by writing the address value
- * of the command to the device memory followed by the data. This function
- * abstracts this common operation.
- */
-static void index_addr(struct denali_nand_info *denali,
- uint32_t address, uint32_t data)
-{
- iowrite32(address, denali->flash_mem);
- iowrite32(data, denali->flash_mem + 0x10);
-}
-
-/* Perform an indexed read of the device */
-static void index_addr_read_data(struct denali_nand_info *denali,
- uint32_t address, uint32_t *pdata)
-{
- iowrite32(address, denali->flash_mem);
- *pdata = ioread32(denali->flash_mem + 0x10);
-}
-
-/*
- * We need to buffer some data for some of the NAND core routines.
- * The operations manage buffering that data.
- */
-static void reset_buf(struct denali_nand_info *denali)
-{
- denali->buf.head = denali->buf.tail = 0;
-}
-
-static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
-{
- denali->buf.buf[denali->buf.tail++] = byte;
-}
-
-/* reads the status of the device */
-static void read_status(struct denali_nand_info *denali)
-{
- uint32_t cmd;
-
- /* initialize the data buffer to store status */
- reset_buf(denali);
-
- cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
- if (cmd)
- write_byte_to_buf(denali, NAND_STATUS_WP);
- else
- write_byte_to_buf(denali, 0);
-}
-
-/* resets a specific device connected to the core */
-static void reset_bank(struct denali_nand_info *denali)
-{
- uint32_t irq_status;
- uint32_t irq_mask = INTR__RST_COMP | INTR__TIME_OUT;
-
- clear_interrupts(denali);
-
- iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
-
- irq_status = wait_for_irq(denali, irq_mask);
-
- if (irq_status & INTR__TIME_OUT)
- dev_err(denali->dev, "reset bank failed.\n");
-}
-
-/* Reset the flash controller */
-static uint16_t denali_nand_reset(struct denali_nand_info *denali)
-{
- int i;
-
- for (i = 0; i < denali->max_banks; i++)
- iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
- denali->flash_reg + INTR_STATUS(i));
-
- for (i = 0; i < denali->max_banks; i++) {
- iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
- while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) &
- (INTR__RST_COMP | INTR__TIME_OUT)))
- cpu_relax();
- if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
- INTR__TIME_OUT)
- dev_dbg(denali->dev,
- "NAND Reset operation timed out on bank %d\n", i);
- }
-
- for (i = 0; i < denali->max_banks; i++)
- iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
- denali->flash_reg + INTR_STATUS(i));
-
- return PASS;
-}
-
-/*
- * this routine calculates the ONFI timing values for a given mode and
- * programs the clocking register accordingly. The mode is determined by
- * the get_onfi_nand_para routine.
- */
-static void nand_onfi_timing_set(struct denali_nand_info *denali,
- uint16_t mode)
-{
- uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
- uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
- uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
- uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
- uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
- uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
- uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
- uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
- uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
- uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
- uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
- uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
-
- uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
- uint16_t dv_window = 0;
- uint16_t en_lo, en_hi;
- uint16_t acc_clks;
- uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
-
- en_lo = CEIL_DIV(Trp[mode], CLK_X);
- en_hi = CEIL_DIV(Treh[mode], CLK_X);
-#if ONFI_BLOOM_TIME
- if ((en_hi * CLK_X) < (Treh[mode] + 2))
- en_hi++;
-#endif
-
- if ((en_lo + en_hi) * CLK_X < Trc[mode])
- en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
-
- if ((en_lo + en_hi) < CLK_MULTI)
- en_lo += CLK_MULTI - en_lo - en_hi;
-
- while (dv_window < 8) {
- data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
-
- data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
-
- data_invalid = data_invalid_rhoh < data_invalid_rloh ?
- data_invalid_rhoh : data_invalid_rloh;
-
- dv_window = data_invalid - Trea[mode];
-
- if (dv_window < 8)
- en_lo++;
- }
-
- acc_clks = CEIL_DIV(Trea[mode], CLK_X);
-
- while (acc_clks * CLK_X - Trea[mode] < 3)
- acc_clks++;
-
- if (data_invalid - acc_clks * CLK_X < 2)
- dev_warn(denali->dev, "%s, Line %d: Warning!\n",
- __FILE__, __LINE__);
-
- addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
- re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
- re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
- we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
- cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
- if (cs_cnt == 0)
- cs_cnt = 1;
-
- if (Tcea[mode]) {
- while (cs_cnt * CLK_X + Trea[mode] < Tcea[mode])
- cs_cnt++;
- }
-
-#if MODE5_WORKAROUND
- if (mode == 5)
- acc_clks = 5;
-#endif
-
- /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
- if (ioread32(denali->flash_reg + MANUFACTURER_ID) == 0 &&
- ioread32(denali->flash_reg + DEVICE_ID) == 0x88)
- acc_clks = 6;
-
- iowrite32(acc_clks, denali->flash_reg + ACC_CLKS);
- iowrite32(re_2_we, denali->flash_reg + RE_2_WE);
- iowrite32(re_2_re, denali->flash_reg + RE_2_RE);
- iowrite32(we_2_re, denali->flash_reg + WE_2_RE);
- iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
- iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
- iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
- iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
-}
-
-/* queries the NAND device to see what ONFI modes it supports. */
-static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
+static void denali_host_write(struct denali_nand_info *denali,
+ uint32_t addr, uint32_t data)
{
- int i;
-
- /*
- * we needn't to do a reset here because driver has already
- * reset all the banks before
- */
- if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
- ONFI_TIMING_MODE__VALUE))
- return FAIL;
-
- for (i = 5; i > 0; i--) {
- if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
- (0x01 << i))
- break;
- }
-
- nand_onfi_timing_set(denali, i);
-
- /*
- * By now, all the ONFI devices we know support the page cache
- * rw feature. So here we enable the pipeline_rw_ahead feature
- */
- /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
- /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
-
- return PASS;
-}
-
-static void get_samsung_nand_para(struct denali_nand_info *denali,
- uint8_t device_id)
-{
- if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
- /* Set timing register values according to datasheet */
- iowrite32(5, denali->flash_reg + ACC_CLKS);
- iowrite32(20, denali->flash_reg + RE_2_WE);
- iowrite32(12, denali->flash_reg + WE_2_RE);
- iowrite32(14, denali->flash_reg + ADDR_2_DATA);
- iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT);
- iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT);
- iowrite32(2, denali->flash_reg + CS_SETUP_CNT);
- }
-}
-
-static void get_toshiba_nand_para(struct denali_nand_info *denali)
-{
- /*
- * Workaround to fix a controller bug which reports a wrong
- * spare area size for some kind of Toshiba NAND device
- */
- if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
- (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64))
- iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
-}
-
-static void get_hynix_nand_para(struct denali_nand_info *denali,
- uint8_t device_id)
-{
- switch (device_id) {
- case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
- case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
- iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
- iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
- iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
- iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
- break;
- default:
- dev_warn(denali->dev,
- "Unknown Hynix NAND (Device ID: 0x%x).\n"
- "Will use default parameter values instead.\n",
- device_id);
- }
-}
-
-/*
- * determines how many NAND chips are connected to the controller. Note for
- * Intel CE4100 devices we don't support more than one device.
- */
-static void find_valid_banks(struct denali_nand_info *denali)
-{
- uint32_t id[denali->max_banks];
- int i;
-
- denali->total_used_banks = 1;
- for (i = 0; i < denali->max_banks; i++) {
- index_addr(denali, MODE_11 | (i << 24) | 0, 0x90);
- index_addr(denali, MODE_11 | (i << 24) | 1, 0);
- index_addr_read_data(denali, MODE_11 | (i << 24) | 2, &id[i]);
-
- dev_dbg(denali->dev,
- "Return 1st ID for bank[%d]: %x\n", i, id[i]);
-
- if (i == 0) {
- if (!(id[i] & 0x0ff))
- break; /* WTF? */
- } else {
- if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
- denali->total_used_banks++;
- else
- break;
- }
- }
-
- if (denali->platform == INTEL_CE4100) {
- /*
- * Platform limitations of the CE4100 device limit
- * users to a single chip solution for NAND.
- * Multichip support is not enabled.
- */
- if (denali->total_used_banks != 1) {
- dev_err(denali->dev,
- "Sorry, Intel CE4100 only supports a single NAND device.\n");
- BUG();
- }
- }
- dev_dbg(denali->dev,
- "denali->total_used_banks: %d\n", denali->total_used_banks);
+ iowrite32(addr, denali->host + DENALI_HOST_ADDR);
+ iowrite32(data, denali->host + DENALI_HOST_DATA);
}
/*
@@ -418,7 +83,7 @@ static void find_valid_banks(struct denali_nand_info *denali)
*/
static void detect_max_banks(struct denali_nand_info *denali)
{
- uint32_t features = ioread32(denali->flash_reg + FEATURES);
+ uint32_t features = ioread32(denali->reg + FEATURES);
denali->max_banks = 1 << (features & FEATURES__N_BANKS);
@@ -427,227 +92,120 @@ static void detect_max_banks(struct denali_nand_info *denali)
denali->max_banks <<= 1;
}
-static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
+static void denali_enable_irq(struct denali_nand_info *denali)
{
- uint16_t status = PASS;
- uint32_t id_bytes[8], addr;
- uint8_t maf_id, device_id;
int i;
- /*
- * Use read id method to get device ID and other params.
- * For some NAND chips, controller can't report the correct
- * device ID by reading from DEVICE_ID register
- */
- addr = MODE_11 | BANK(denali->flash_bank);
- index_addr(denali, addr | 0, 0x90);
- index_addr(denali, addr | 1, 0);
- for (i = 0; i < 8; i++)
- index_addr_read_data(denali, addr | 2, &id_bytes[i]);
- maf_id = id_bytes[0];
- device_id = id_bytes[1];
-
- if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
- ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
- if (FAIL == get_onfi_nand_para(denali))
- return FAIL;
- } else if (maf_id == 0xEC) { /* Samsung NAND */
- get_samsung_nand_para(denali, device_id);
- } else if (maf_id == 0x98) { /* Toshiba NAND */
- get_toshiba_nand_para(denali);
- } else if (maf_id == 0xAD) { /* Hynix NAND */
- get_hynix_nand_para(denali, device_id);
- }
-
- dev_info(denali->dev,
- "Dump timing register values:\n"
- "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
- "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
- "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
- ioread32(denali->flash_reg + ACC_CLKS),
- ioread32(denali->flash_reg + RE_2_WE),
- ioread32(denali->flash_reg + RE_2_RE),
- ioread32(denali->flash_reg + WE_2_RE),
- ioread32(denali->flash_reg + ADDR_2_DATA),
- ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
- ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
- ioread32(denali->flash_reg + CS_SETUP_CNT));
-
- find_valid_banks(denali);
-
- /*
- * If the user specified to override the default timings
- * with a specific ONFI mode, we apply those changes here.
- */
- if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
- nand_onfi_timing_set(denali, onfi_timing_mode);
-
- return status;
+ for (i = 0; i < DENALI_NR_BANKS; i++)
+ iowrite32(U32_MAX, denali->reg + INTR_EN(i));
+ iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
}
-static void denali_set_intr_modes(struct denali_nand_info *denali,
- uint16_t INT_ENABLE)
+static void denali_disable_irq(struct denali_nand_info *denali)
{
- if (INT_ENABLE)
- iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
- else
- iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
-}
-
-/*
- * validation function to verify that the controlling software is making
- * a valid request
- */
-static inline bool is_flash_bank_valid(int flash_bank)
-{
- return flash_bank >= 0 && flash_bank < 4;
-}
-
-static void denali_irq_init(struct denali_nand_info *denali)
-{
- uint32_t int_mask;
int i;
- /* Disable global interrupts */
- denali_set_intr_modes(denali, false);
-
- int_mask = DENALI_IRQ_ALL;
-
- /* Clear all status bits */
- for (i = 0; i < denali->max_banks; ++i)
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
-
- denali_irq_enable(denali, int_mask);
+ for (i = 0; i < DENALI_NR_BANKS; i++)
+ iowrite32(0, denali->reg + INTR_EN(i));
+ iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
}
-static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
+static void denali_clear_irq(struct denali_nand_info *denali,
+ int bank, uint32_t irq_status)
{
- denali_set_intr_modes(denali, false);
+ /* write one to clear bits */
+ iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
}
-static void denali_irq_enable(struct denali_nand_info *denali,
- uint32_t int_mask)
+static void denali_clear_irq_all(struct denali_nand_info *denali)
{
int i;
- for (i = 0; i < denali->max_banks; ++i)
- iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
+ for (i = 0; i < DENALI_NR_BANKS; i++)
+ denali_clear_irq(denali, i, U32_MAX);
}
-/*
- * This function only returns when an interrupt that this driver cares about
- * occurs. This is to reduce the overhead of servicing interrupts
- */
-static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
+static irqreturn_t denali_isr(int irq, void *dev_id)
{
- return read_interrupt_status(denali) & DENALI_IRQ_ALL;
-}
+ struct denali_nand_info *denali = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ uint32_t irq_status;
+ int i;
-/* Interrupts are cleared by writing a 1 to the appropriate status bit */
-static inline void clear_interrupt(struct denali_nand_info *denali,
- uint32_t irq_mask)
-{
- uint32_t intr_status_reg;
+ spin_lock(&denali->irq_lock);
- intr_status_reg = INTR_STATUS(denali->flash_bank);
+ for (i = 0; i < DENALI_NR_BANKS; i++) {
+ irq_status = ioread32(denali->reg + INTR_STATUS(i));
+ if (irq_status)
+ ret = IRQ_HANDLED;
- iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
-}
+ denali_clear_irq(denali, i, irq_status);
-static void clear_interrupts(struct denali_nand_info *denali)
-{
- uint32_t status;
+ if (i != denali->active_bank)
+ continue;
- spin_lock_irq(&denali->irq_lock);
+ denali->irq_status |= irq_status;
- status = read_interrupt_status(denali);
- clear_interrupt(denali, status);
+ if (denali->irq_status & denali->irq_mask)
+ complete(&denali->complete);
+ }
+
+ spin_unlock(&denali->irq_lock);
- denali->irq_status = 0x0;
- spin_unlock_irq(&denali->irq_lock);
+ return ret;
}
-static uint32_t read_interrupt_status(struct denali_nand_info *denali)
+static void denali_reset_irq(struct denali_nand_info *denali)
{
- uint32_t intr_status_reg;
-
- intr_status_reg = INTR_STATUS(denali->flash_bank);
+ unsigned long flags;
- return ioread32(denali->flash_reg + intr_status_reg);
+ spin_lock_irqsave(&denali->irq_lock, flags);
+ denali->irq_status = 0;
+ denali->irq_mask = 0;
+ spin_unlock_irqrestore(&denali->irq_lock, flags);
}
-/*
- * This is the interrupt service routine. It handles all interrupts
- * sent to this device. Note that on CE4100, this is a shared interrupt.
- */
-static irqreturn_t denali_isr(int irq, void *dev_id)
+static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
+ uint32_t irq_mask)
{
- struct denali_nand_info *denali = dev_id;
+ unsigned long time_left, flags;
uint32_t irq_status;
- irqreturn_t result = IRQ_NONE;
- spin_lock(&denali->irq_lock);
+ spin_lock_irqsave(&denali->irq_lock, flags);
- /* check to see if a valid NAND chip has been selected. */
- if (is_flash_bank_valid(denali->flash_bank)) {
- /*
- * check to see if controller generated the interrupt,
- * since this is a shared interrupt
- */
- irq_status = denali_irq_detected(denali);
- if (irq_status != 0) {
- /* handle interrupt */
- /* first acknowledge it */
- clear_interrupt(denali, irq_status);
- /*
- * store the status in the device context for someone
- * to read
- */
- denali->irq_status |= irq_status;
- /* notify anyone who cares that it happened */
- complete(&denali->complete);
- /* tell the OS that we've handled this */
- result = IRQ_HANDLED;
- }
+ irq_status = denali->irq_status;
+
+ if (irq_mask & irq_status) {
+ /* return immediately if the IRQ has already happened. */
+ spin_unlock_irqrestore(&denali->irq_lock, flags);
+ return irq_status;
}
- spin_unlock(&denali->irq_lock);
- return result;
-}
-static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
-{
- unsigned long comp_res;
- uint32_t intr_status;
- unsigned long timeout = msecs_to_jiffies(1000);
+ denali->irq_mask = irq_mask;
+ reinit_completion(&denali->complete);
+ spin_unlock_irqrestore(&denali->irq_lock, flags);
- do {
- comp_res =
- wait_for_completion_timeout(&denali->complete, timeout);
- spin_lock_irq(&denali->irq_lock);
- intr_status = denali->irq_status;
-
- if (intr_status & irq_mask) {
- denali->irq_status &= ~irq_mask;
- spin_unlock_irq(&denali->irq_lock);
- /* our interrupt was detected */
- break;
- }
+ time_left = wait_for_completion_timeout(&denali->complete,
+ msecs_to_jiffies(1000));
+ if (!time_left) {
+ dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
+ denali->irq_mask);
+ return 0;
+ }
- /*
- * these are not the interrupts you are looking for -
- * need to wait again
- */
- spin_unlock_irq(&denali->irq_lock);
- } while (comp_res != 0);
+ return denali->irq_status;
+}
+
+static uint32_t denali_check_irq(struct denali_nand_info *denali)
+{
+ unsigned long flags;
+ uint32_t irq_status;
- if (comp_res == 0) {
- /* timeout */
- pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
- intr_status, irq_mask);
+ spin_lock_irqsave(&denali->irq_lock, flags);
+ irq_status = denali->irq_status;
+ spin_unlock_irqrestore(&denali->irq_lock, flags);
- intr_status = 0;
- }
- return intr_status;
+ return irq_status;
}
/*
@@ -664,153 +222,111 @@ static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
/* Enable spare area/ECC per user's request. */
- iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
- iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
+ iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE);
+ iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG);
}
-/*
- * sends a pipeline command operation to the controller. See the Denali NAND
- * controller's user guide for more information (section 4.2.3.6).
- */
-static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
- bool ecc_en, bool transfer_spare,
- int access_type, int op)
+static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
- int status = PASS;
- uint32_t addr, cmd;
-
- setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int i;
- clear_interrupts(denali);
+ iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
+ denali->host + DENALI_HOST_ADDR);
- addr = BANK(denali->flash_bank) | denali->page;
+ for (i = 0; i < len; i++)
+ buf[i] = ioread32(denali->host + DENALI_HOST_DATA);
+}
- if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
- cmd = MODE_01 | addr;
- iowrite32(cmd, denali->flash_mem);
- } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
- /* read spare area */
- cmd = MODE_10 | addr;
- index_addr(denali, cmd, access_type);
+static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int i;
- cmd = MODE_01 | addr;
- iowrite32(cmd, denali->flash_mem);
- } else if (op == DENALI_READ) {
- /* setup page read request for access type */
- cmd = MODE_10 | addr;
- index_addr(denali, cmd, access_type);
+ iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
+ denali->host + DENALI_HOST_ADDR);
- cmd = MODE_01 | addr;
- iowrite32(cmd, denali->flash_mem);
- }
- return status;
+ for (i = 0; i < len; i++)
+ iowrite32(buf[i], denali->host + DENALI_HOST_DATA);
}
-/* helper function that simply writes a buffer to the flash */
-static int write_data_to_flash_mem(struct denali_nand_info *denali,
- const uint8_t *buf, int len)
+static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
- uint32_t *buf32;
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint16_t *buf16 = (uint16_t *)buf;
int i;
- /*
- * verify that the len is a multiple of 4.
- * see comment in read_data_from_flash_mem()
- */
- BUG_ON((len % 4) != 0);
+ iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
+ denali->host + DENALI_HOST_ADDR);
- /* write the data to the flash memory */
- buf32 = (uint32_t *)buf;
- for (i = 0; i < len / 4; i++)
- iowrite32(*buf32++, denali->flash_mem + 0x10);
- return i * 4; /* intent is to return the number of bytes read */
+ for (i = 0; i < len / 2; i++)
+ buf16[i] = ioread32(denali->host + DENALI_HOST_DATA);
}
-/* helper function that simply reads a buffer from the flash */
-static int read_data_from_flash_mem(struct denali_nand_info *denali,
- uint8_t *buf, int len)
+static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
{
- uint32_t *buf32;
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ const uint16_t *buf16 = (const uint16_t *)buf;
int i;
- /*
- * we assume that len will be a multiple of 4, if not it would be nice
- * to know about it ASAP rather than have random failures...
- * This assumption is based on the fact that this function is designed
- * to be used to read flash pages, which are typically multiples of 4.
- */
- BUG_ON((len % 4) != 0);
+ iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
+ denali->host + DENALI_HOST_ADDR);
- /* transfer the data from the flash */
- buf32 = (uint32_t *)buf;
- for (i = 0; i < len / 4; i++)
- *buf32++ = ioread32(denali->flash_mem + 0x10);
- return i * 4; /* intent is to return the number of bytes read */
+ for (i = 0; i < len / 2; i++)
+ iowrite32(buf16[i], denali->host + DENALI_HOST_DATA);
}
-/* writes OOB data to the device */
-static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+static uint8_t denali_read_byte(struct mtd_info *mtd)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- uint32_t irq_status;
- uint32_t irq_mask = INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL;
- int status = 0;
+ uint8_t byte;
- denali->page = page;
+ denali_read_buf(mtd, &byte, 1);
- if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
- DENALI_WRITE) == PASS) {
- write_data_to_flash_mem(denali, buf, mtd->oobsize);
+ return byte;
+}
- /* wait for operation to complete */
- irq_status = wait_for_irq(denali, irq_mask);
+static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
+{
+ denali_write_buf(mtd, &byte, 1);
+}
- if (irq_status == 0) {
- dev_err(denali->dev, "OOB write failed\n");
- status = -EIO;
- }
- } else {
- dev_err(denali->dev, "unable to send pipeline command\n");
- status = -EIO;
- }
- return status;
+static uint16_t denali_read_word(struct mtd_info *mtd)
+{
+ uint16_t word;
+
+ denali_read_buf16(mtd, (uint8_t *)&word, 2);
+
+ return word;
}
-/* reads OOB data from the device */
-static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- uint32_t irq_mask = INTR__LOAD_COMP;
- uint32_t irq_status, addr, cmd;
+ uint32_t type;
- denali->page = page;
+ if (ctrl & NAND_CLE)
+ type = DENALI_MAP11_CMD;
+ else if (ctrl & NAND_ALE)
+ type = DENALI_MAP11_ADDR;
+ else
+ return;
- if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
- DENALI_READ) == PASS) {
- read_data_from_flash_mem(denali, buf, mtd->oobsize);
+ /*
+ * Some commands are followed by chip->dev_ready or chip->waitfunc.
+ * irq_status must be cleared here to catch the R/B# interrupt later.
+ */
+ if (ctrl & NAND_CTRL_CHANGE)
+ denali_reset_irq(denali);
- /*
- * wait for command to be accepted
- * can always use status0 bit as the
- * mask is identical for each bank.
- */
- irq_status = wait_for_irq(denali, irq_mask);
+ denali_host_write(denali, DENALI_BANK(denali) | type, dat);
+}
- if (irq_status == 0)
- dev_err(denali->dev, "page on OOB timeout %d\n",
- denali->page);
+static int denali_dev_ready(struct mtd_info *mtd)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
- /*
- * We set the device back to MAIN_ACCESS here as I observed
- * instability with the controller if you do a block erase
- * and the last transaction was a SPARE_ACCESS. Block erase
- * is reliable (according to the MTD test infrastructure)
- * if you are in MAIN_ACCESS.
- */
- addr = BANK(denali->flash_bank) | denali->page;
- cmd = MODE_10 | addr;
- index_addr(denali, cmd, MAIN_ACCESS);
- }
+ return !!(denali_check_irq(denali) & INTR__INT_ACT);
}
static int denali_check_erased_page(struct mtd_info *mtd,
@@ -856,11 +372,11 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
unsigned long *uncor_ecc_flags)
{
struct nand_chip *chip = mtd_to_nand(mtd);
- int bank = denali->flash_bank;
+ int bank = denali->active_bank;
uint32_t ecc_cor;
unsigned int max_bitflips;
- ecc_cor = ioread32(denali->flash_reg + ECC_COR_INFO(bank));
+ ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
@@ -886,8 +402,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-#define ECC_SECTOR_SIZE 512
-
#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
@@ -899,22 +413,23 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
struct denali_nand_info *denali,
unsigned long *uncor_ecc_flags, uint8_t *buf)
{
+ unsigned int ecc_size = denali->nand.ecc.size;
unsigned int bitflips = 0;
unsigned int max_bitflips = 0;
uint32_t err_addr, err_cor_info;
unsigned int err_byte, err_sector, err_device;
uint8_t err_cor_value;
unsigned int prev_sector = 0;
+ uint32_t irq_status;
- /* read the ECC errors. we'll ignore them for now */
- denali_set_intr_modes(denali, false);
+ denali_reset_irq(denali);
do {
- err_addr = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS);
+ err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
err_sector = ECC_SECTOR(err_addr);
err_byte = ECC_BYTE(err_addr);
- err_cor_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO);
+ err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
err_device = ECC_ERR_DEVICE(err_cor_info);
@@ -928,9 +443,9 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
* an erased sector.
*/
*uncor_ecc_flags |= BIT(err_sector);
- } else if (err_byte < ECC_SECTOR_SIZE) {
+ } else if (err_byte < ecc_size) {
/*
- * If err_byte is larger than ECC_SECTOR_SIZE, means error
+ * If err_byte is larger than ecc_size, means error
* happened in OOB, so we ignore it. It's no need for
* us to correct it err_device is represented the NAND
* error bits are happened in if there are more than
@@ -939,8 +454,8 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
int offset;
unsigned int flips_in_byte;
- offset = (err_sector * ECC_SECTOR_SIZE + err_byte) *
- denali->devnum + err_device;
+ offset = (err_sector * ecc_size + err_byte) *
+ denali->devs_per_cs + err_device;
/* correct the ECC error */
flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
@@ -959,10 +474,9 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
* ECC_TRANSACTION_DONE interrupt, so here just wait for
* a while for this interrupt
*/
- while (!(read_interrupt_status(denali) & INTR__ECC_TRANSACTION_DONE))
- cpu_relax();
- clear_interrupts(denali);
- denali_set_intr_modes(denali, true);
+ irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
+ if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
+ return -EIO;
return max_bitflips;
}
@@ -970,17 +484,17 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
/* programs the controller to either enable/disable DMA transfers */
static void denali_enable_dma(struct denali_nand_info *denali, bool en)
{
- iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
- ioread32(denali->flash_reg + DMA_ENABLE);
+ iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE);
+ ioread32(denali->reg + DMA_ENABLE);
}
-static void denali_setup_dma64(struct denali_nand_info *denali, int op)
+static void denali_setup_dma64(struct denali_nand_info *denali,
+ dma_addr_t dma_addr, int page, int write)
{
uint32_t mode;
const int page_count = 1;
- uint64_t addr = denali->buf.dma_buf;
- mode = MODE_10 | BANK(denali->flash_bank) | denali->page;
+ mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
/* DMA is a three step process */
@@ -988,191 +502,354 @@ static void denali_setup_dma64(struct denali_nand_info *denali, int op)
* 1. setup transfer type, interrupt when complete,
* burst len = 64 bytes, the number of pages
*/
- index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count);
+ denali_host_write(denali, mode,
+ 0x01002000 | (64 << 16) | (write << 8) | page_count);
/* 2. set memory low address */
- index_addr(denali, mode, addr);
+ denali_host_write(denali, mode, dma_addr);
/* 3. set memory high address */
- index_addr(denali, mode, addr >> 32);
+ denali_host_write(denali, mode, (uint64_t)dma_addr >> 32);
}
-static void denali_setup_dma32(struct denali_nand_info *denali, int op)
+static void denali_setup_dma32(struct denali_nand_info *denali,
+ dma_addr_t dma_addr, int page, int write)
{
uint32_t mode;
const int page_count = 1;
- uint32_t addr = denali->buf.dma_buf;
- mode = MODE_10 | BANK(denali->flash_bank);
+ mode = DENALI_MAP10 | DENALI_BANK(denali);
/* DMA is a four step process */
/* 1. setup transfer type and # of pages */
- index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
+ denali_host_write(denali, mode | page,
+ 0x2000 | (write << 8) | page_count);
/* 2. set memory high address bits 23:8 */
- index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
+ denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
/* 3. set memory low address bits 23:8 */
- index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
+ denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
/* 4. interrupt when complete, burst len = 64 bytes */
- index_addr(denali, mode | 0x14000, 0x2400);
+ denali_host_write(denali, mode | 0x14000, 0x2400);
}
-static void denali_setup_dma(struct denali_nand_info *denali, int op)
+static void denali_setup_dma(struct denali_nand_info *denali,
+ dma_addr_t dma_addr, int page, int write)
{
if (denali->caps & DENALI_CAP_DMA_64BIT)
- denali_setup_dma64(denali, op);
+ denali_setup_dma64(denali, dma_addr, page, write);
else
- denali_setup_dma32(denali, op);
+ denali_setup_dma32(denali, dma_addr, page, write);
}
-/*
- * writes a page. user specifies type, and this function handles the
- * configuration details.
- */
-static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, bool raw_xfer)
+static int denali_pio_read(struct denali_nand_info *denali, void *buf,
+ size_t size, int page, int raw)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- dma_addr_t addr = denali->buf.dma_buf;
- size_t size = mtd->writesize + mtd->oobsize;
+ uint32_t addr = DENALI_BANK(denali) | page;
+ uint32_t *buf32 = (uint32_t *)buf;
+ uint32_t irq_status, ecc_err_mask;
+ int i;
+
+ if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
+ ecc_err_mask = INTR__ECC_UNCOR_ERR;
+ else
+ ecc_err_mask = INTR__ECC_ERR;
+
+ denali_reset_irq(denali);
+
+ iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
+ for (i = 0; i < size / 4; i++)
+ *buf32++ = ioread32(denali->host + DENALI_HOST_DATA);
+
+ irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
+ if (!(irq_status & INTR__PAGE_XFER_INC))
+ return -EIO;
+
+ if (irq_status & INTR__ERASED_PAGE)
+ memset(buf, 0xff, size);
+
+ return irq_status & ecc_err_mask ? -EBADMSG : 0;
+}
+
+static int denali_pio_write(struct denali_nand_info *denali,
+ const void *buf, size_t size, int page, int raw)
+{
+ uint32_t addr = DENALI_BANK(denali) | page;
+ const uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status;
- uint32_t irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
+ int i;
- /*
- * if it is a raw xfer, we want to disable ecc and send the spare area.
- * !raw_xfer - enable ecc
- * raw_xfer - transfer spare
- */
- setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
+ denali_reset_irq(denali);
- /* copy buffer into DMA buffer */
- memcpy(denali->buf.buf, buf, mtd->writesize);
+ iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
+ for (i = 0; i < size / 4; i++)
+ iowrite32(*buf32++, denali->host + DENALI_HOST_DATA);
- if (raw_xfer) {
- /* transfer the data to the spare area */
- memcpy(denali->buf.buf + mtd->writesize,
- chip->oob_poi,
- mtd->oobsize);
+ irq_status = denali_wait_for_irq(denali,
+ INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
+ if (!(irq_status & INTR__PROGRAM_COMP))
+ return -EIO;
+
+ return 0;
+}
+
+static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
+ size_t size, int page, int raw, int write)
+{
+ if (write)
+ return denali_pio_write(denali, buf, size, page, raw);
+ else
+ return denali_pio_read(denali, buf, size, page, raw);
+}
+
+static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
+ size_t size, int page, int raw, int write)
+{
+ dma_addr_t dma_addr;
+ uint32_t irq_mask, irq_status, ecc_err_mask;
+ enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ dma_addr = dma_map_single(denali->dev, buf, size, dir);
+ if (dma_mapping_error(denali->dev, dma_addr)) {
+ dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
+ return denali_pio_xfer(denali, buf, size, page, raw, write);
}
- dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
+ if (write) {
+ /*
+ * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
+ * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
+ * when the page program is completed.
+ */
+ irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
+ ecc_err_mask = 0;
+ } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
+ irq_mask = INTR__DMA_CMD_COMP;
+ ecc_err_mask = INTR__ECC_UNCOR_ERR;
+ } else {
+ irq_mask = INTR__DMA_CMD_COMP;
+ ecc_err_mask = INTR__ECC_ERR;
+ }
- clear_interrupts(denali);
denali_enable_dma(denali, true);
- denali_setup_dma(denali, DENALI_WRITE);
+ denali_reset_irq(denali);
+ denali_setup_dma(denali, dma_addr, page, write);
/* wait for operation to complete */
- irq_status = wait_for_irq(denali, irq_mask);
-
- if (irq_status == 0) {
- dev_err(denali->dev, "timeout on write_page (type = %d)\n",
- raw_xfer);
- denali->status = NAND_STATUS_FAIL;
- }
+ irq_status = denali_wait_for_irq(denali, irq_mask);
+ if (!(irq_status & INTR__DMA_CMD_COMP))
+ ret = -EIO;
+ else if (irq_status & ecc_err_mask)
+ ret = -EBADMSG;
denali_enable_dma(denali, false);
- dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
+ dma_unmap_single(denali->dev, dma_addr, size, dir);
- return 0;
-}
+ if (irq_status & INTR__ERASED_PAGE)
+ memset(buf, 0xff, size);
-/* NAND core entry points */
+ return ret;
+}
-/*
- * this is the callback that the NAND core calls to write a page. Since
- * writing a page with ECC or without is similar, all the work is done
- * by write_page above.
- */
-static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
+ size_t size, int page, int raw, int write)
{
- /*
- * for regular page writes, we let HW handle all the ECC
- * data written to the device.
- */
- return write_page(mtd, chip, buf, false);
+ setup_ecc_for_xfer(denali, !raw, raw);
+
+ if (denali->dma_avail)
+ return denali_dma_xfer(denali, buf, size, page, raw, write);
+ else
+ return denali_pio_xfer(denali, buf, size, page, raw, write);
}
-/*
- * This is the callback that the NAND core calls to write a page without ECC.
- * raw access is similar to ECC page writes, so all the work is done in the
- * write_page() function above.
- */
-static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
+static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int write)
{
- /*
- * for raw page writes, we want to disable ECC and simply write
- * whatever data is in the buffer.
- */
- return write_page(mtd, chip, buf, true);
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
+ unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ uint8_t *bufpoi = chip->oob_poi;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int oob_skip = denali->oob_skip_bytes;
+ size_t size = writesize + oobsize;
+ int i, pos, len;
+
+ /* BBM at the beginning of the OOB area */
+ chip->cmdfunc(mtd, start_cmd, writesize, page);
+ if (write)
+ chip->write_buf(mtd, bufpoi, oob_skip);
+ else
+ chip->read_buf(mtd, bufpoi, oob_skip);
+ bufpoi += oob_skip;
+
+ /* OOB ECC */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ len = ecc_bytes;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ chip->cmdfunc(mtd, rnd_cmd, pos, -1);
+ if (write)
+ chip->write_buf(mtd, bufpoi, len);
+ else
+ chip->read_buf(mtd, bufpoi, len);
+ bufpoi += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
+ if (write)
+ chip->write_buf(mtd, bufpoi, len);
+ else
+ chip->read_buf(mtd, bufpoi, len);
+ bufpoi += len;
+ }
+ }
+
+ /* OOB free */
+ len = oobsize - (bufpoi - chip->oob_poi);
+ chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
+ if (write)
+ chip->write_buf(mtd, bufpoi, len);
+ else
+ chip->read_buf(mtd, bufpoi, len);
}
-static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
{
- return write_oob_data(mtd, chip->oob_poi, page);
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *dma_buf = denali->buf;
+ int oob_skip = denali->oob_skip_bytes;
+ size_t size = writesize + oobsize;
+ int ret, i, pos, len;
+
+ ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0);
+ if (ret)
+ return ret;
+
+ /* Arrange the buffer for syndrome payload/ecc layout */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(buf, dma_buf + pos, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(buf, dma_buf + writesize + oob_skip,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ uint8_t *oob = chip->oob_poi;
+
+ /* BBM at the beginning of the OOB area */
+ memcpy(oob, dma_buf + writesize, oob_skip);
+ oob += oob_skip;
+
+ /* OOB ECC */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ len = ecc_bytes;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(oob, dma_buf + pos, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(oob, dma_buf + writesize + oob_skip,
+ len);
+ oob += len;
+ }
+ }
+
+ /* OOB free */
+ len = oobsize - (oob - chip->oob_poi);
+ memcpy(oob, dma_buf + size - len, len);
+ }
+
+ return 0;
}
static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- read_oob_data(mtd, chip->oob_poi, page);
+ denali_oob_xfer(mtd, chip, page, 0);
return 0;
}
-static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- dma_addr_t addr = denali->buf.dma_buf;
- size_t size = mtd->writesize + mtd->oobsize;
- uint32_t irq_status;
- uint32_t irq_mask = denali->caps & DENALI_CAP_HW_ECC_FIXUP ?
- INTR__DMA_CMD_COMP | INTR__ECC_UNCOR_ERR :
- INTR__ECC_TRANSACTION_DONE | INTR__ECC_ERR;
- unsigned long uncor_ecc_flags = 0;
- int stat = 0;
+ int status;
- if (page != denali->page) {
- dev_err(denali->dev,
- "IN %s: page %d is not equal to denali->page %d",
- __func__, page, denali->page);
- BUG();
- }
+ denali_reset_irq(denali);
- setup_ecc_for_xfer(denali, true, false);
+ denali_oob_xfer(mtd, chip, page, 1);
- denali_enable_dma(denali, true);
- dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
- clear_interrupts(denali);
- denali_setup_dma(denali, DENALI_READ);
-
- /* wait for operation to complete */
- irq_status = wait_for_irq(denali, irq_mask);
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
- dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
+static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ unsigned long uncor_ecc_flags = 0;
+ int stat = 0;
+ int ret;
- memcpy(buf, denali->buf.buf, mtd->writesize);
+ ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
+ if (ret && ret != -EBADMSG)
+ return ret;
if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
- else if (irq_status & INTR__ECC_ERR)
+ else if (ret == -EBADMSG)
stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
- denali_enable_dma(denali, false);
if (stat < 0)
return stat;
if (uncor_ecc_flags) {
- read_oob_data(mtd, chip->oob_poi, denali->page);
+ ret = denali_read_oob(mtd, chip, page);
+ if (ret)
+ return ret;
stat = denali_check_erased_page(mtd, chip, buf,
uncor_ecc_flags, stat);
@@ -1181,137 +858,266 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return stat;
}
-static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- dma_addr_t addr = denali->buf.dma_buf;
- size_t size = mtd->writesize + mtd->oobsize;
- uint32_t irq_mask = INTR__DMA_CMD_COMP;
-
- if (page != denali->page) {
- dev_err(denali->dev,
- "IN %s: page %d is not equal to denali->page %d",
- __func__, page, denali->page);
- BUG();
- }
-
- setup_ecc_for_xfer(denali, false, true);
- denali_enable_dma(denali, true);
-
- dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
-
- clear_interrupts(denali);
- denali_setup_dma(denali, DENALI_READ);
-
- /* wait for operation to complete */
- wait_for_irq(denali, irq_mask);
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *dma_buf = denali->buf;
+ int oob_skip = denali->oob_skip_bytes;
+ size_t size = writesize + oobsize;
+ int i, pos, len;
- dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
+ /*
+ * Fill the buffer with 0xff first except the full page transfer.
+ * This simplifies the logic.
+ */
+ if (!buf || !oob_required)
+ memset(dma_buf, 0xff, size);
+
+ /* Arrange the buffer for syndrome payload/ecc layout */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(dma_buf + pos, buf, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(dma_buf + writesize + oob_skip, buf,
+ len);
+ buf += len;
+ }
+ }
+ }
- denali_enable_dma(denali, false);
+ if (oob_required) {
+ const uint8_t *oob = chip->oob_poi;
+
+ /* BBM at the beginning of the OOB area */
+ memcpy(dma_buf + writesize, oob, oob_skip);
+ oob += oob_skip;
+
+ /* OOB ECC */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ len = ecc_bytes;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(dma_buf + pos, oob, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(dma_buf + writesize + oob_skip, oob,
+ len);
+ oob += len;
+ }
+ }
- memcpy(buf, denali->buf.buf, mtd->writesize);
- memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
+ /* OOB free */
+ len = oobsize - (oob - chip->oob_poi);
+ memcpy(dma_buf + size - len, oob, len);
+ }
- return 0;
+ return denali_data_xfer(denali, dma_buf, size, page, 1, 1);
}
-static uint8_t denali_read_byte(struct mtd_info *mtd)
+static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- uint8_t result = 0xff;
-
- if (denali->buf.head < denali->buf.tail)
- result = denali->buf.buf[denali->buf.head++];
- return result;
+ return denali_data_xfer(denali, (void *)buf, mtd->writesize,
+ page, 0, 1);
}
static void denali_select_chip(struct mtd_info *mtd, int chip)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- spin_lock_irq(&denali->irq_lock);
- denali->flash_bank = chip;
- spin_unlock_irq(&denali->irq_lock);
+ denali->active_bank = chip;
}
static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- int status = denali->status;
+ uint32_t irq_status;
- denali->status = 0;
+ /* R/B# pin transitioned from low to high? */
+ irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
- return status;
+ return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
}
static int denali_erase(struct mtd_info *mtd, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_status;
- uint32_t cmd, irq_status;
-
- clear_interrupts(denali);
+ denali_reset_irq(denali);
- /* setup page read request for access type */
- cmd = MODE_10 | BANK(denali->flash_bank) | page;
- index_addr(denali, cmd, 0x1);
+ denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
+ DENALI_ERASE);
/* wait for erase to complete or failure to occur */
- irq_status = wait_for_irq(denali, INTR__ERASE_COMP | INTR__ERASE_FAIL);
+ irq_status = denali_wait_for_irq(denali,
+ INTR__ERASE_COMP | INTR__ERASE_FAIL);
- return irq_status & INTR__ERASE_FAIL ? NAND_STATUS_FAIL : PASS;
+ return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
}
-static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
- int page)
+#define DIV_ROUND_DOWN_ULL(ll, d) \
+ ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
+
+static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface *conf)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- uint32_t addr, id;
+ const struct nand_sdr_timings *timings;
+ unsigned long t_clk;
+ int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
+ int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
+ int addr_2_data_mask;
+ uint32_t tmp;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ /* clk_x period in picoseconds */
+ t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
+ if (!t_clk)
+ return -EINVAL;
+
+ if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ /* tREA -> ACC_CLKS */
+ acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
+ acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
+
+ tmp = ioread32(denali->reg + ACC_CLKS);
+ tmp &= ~ACC_CLKS__VALUE;
+ tmp |= acc_clks;
+ iowrite32(tmp, denali->reg + ACC_CLKS);
+
+ /* tRWH -> RE_2_WE */
+ re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
+ re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
+
+ tmp = ioread32(denali->reg + RE_2_WE);
+ tmp &= ~RE_2_WE__VALUE;
+ tmp |= re_2_we;
+ iowrite32(tmp, denali->reg + RE_2_WE);
+
+ /* tRHZ -> RE_2_RE */
+ re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
+ re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
+
+ tmp = ioread32(denali->reg + RE_2_RE);
+ tmp &= ~RE_2_RE__VALUE;
+ tmp |= re_2_re;
+ iowrite32(tmp, denali->reg + RE_2_RE);
+
+ /* tWHR -> WE_2_RE */
+ we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
+ we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
+
+ tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
+ tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
+ tmp |= we_2_re;
+ iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
+
+ /* tADL -> ADDR_2_DATA */
+
+ /* for older versions, ADDR_2_DATA is only 6 bit wide */
+ addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ if (denali->revision < 0x0501)
+ addr_2_data_mask >>= 1;
+
+ addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
+ addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
+
+ tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
+ tmp &= ~addr_2_data_mask;
+ tmp |= addr_2_data;
+ iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
+
+ /* tREH, tWH -> RDWR_EN_HI_CNT */
+ rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
+ t_clk);
+ rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
+
+ tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
+ tmp &= ~RDWR_EN_HI_CNT__VALUE;
+ tmp |= rdwr_en_hi;
+ iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
+
+ /* tRP, tWP -> RDWR_EN_LO_CNT */
+ rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
+ t_clk);
+ rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
+ t_clk);
+ rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
+ rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
+ rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
+
+ tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
+ tmp &= ~RDWR_EN_LO_CNT__VALUE;
+ tmp |= rdwr_en_lo;
+ iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
+
+ /* tCS, tCEA -> CS_SETUP_CNT */
+ cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
+ (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
+ 0);
+ cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
+
+ tmp = ioread32(denali->reg + CS_SETUP_CNT);
+ tmp &= ~CS_SETUP_CNT__VALUE;
+ tmp |= cs_setup;
+ iowrite32(tmp, denali->reg + CS_SETUP_CNT);
+
+ return 0;
+}
+
+static void denali_reset_banks(struct denali_nand_info *denali)
+{
+ u32 irq_status;
int i;
- switch (cmd) {
- case NAND_CMD_PAGEPROG:
- break;
- case NAND_CMD_STATUS:
- read_status(denali);
- break;
- case NAND_CMD_READID:
- case NAND_CMD_PARAM:
- reset_buf(denali);
- /*
- * sometimes ManufactureId read from register is not right
- * e.g. some of Micron MT29F32G08QAA MLC NAND chips
- * So here we send READID cmd to NAND insteand
- */
- addr = MODE_11 | BANK(denali->flash_bank);
- index_addr(denali, addr | 0, 0x90);
- index_addr(denali, addr | 1, col);
- for (i = 0; i < 8; i++) {
- index_addr_read_data(denali, addr | 2, &id);
- write_byte_to_buf(denali, id);
- }
- break;
- case NAND_CMD_READ0:
- case NAND_CMD_SEQIN:
- denali->page = page;
- break;
- case NAND_CMD_RESET:
- reset_bank(denali);
- break;
- case NAND_CMD_READOOB:
- /* TODO: Read OOB data */
- break;
- default:
- pr_err(": unsupported command received 0x%x\n", cmd);
- break;
+ for (i = 0; i < denali->max_banks; i++) {
+ denali->active_bank = i;
+
+ denali_reset_irq(denali);
+
+ iowrite32(DEVICE_RESET__BANK(i),
+ denali->reg + DEVICE_RESET);
+
+ irq_status = denali_wait_for_irq(denali,
+ INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
+ if (!(irq_status & INTR__INT_ACT))
+ break;
}
+
+ dev_dbg(denali->dev, "%d chips connected\n", i);
+ denali->max_banks = i;
}
-/* end NAND core entry points */
-/* Initialization code to bring the device up to a known good state */
static void denali_hw_init(struct denali_nand_info *denali)
{
/*
@@ -1319,8 +1125,7 @@ static void denali_hw_init(struct denali_nand_info *denali)
* override it.
*/
if (!denali->revision)
- denali->revision =
- swab16(ioread32(denali->flash_reg + REVISION));
+ denali->revision = swab16(ioread32(denali->reg + REVISION));
/*
* tell driver how many bit controller will skip before
@@ -1328,30 +1133,51 @@ static void denali_hw_init(struct denali_nand_info *denali)
* set by firmware. So we read this value out.
* if this value is 0, just let it be.
*/
- denali->bbtskipbytes = ioread32(denali->flash_reg +
- SPARE_AREA_SKIP_BYTES);
+ denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
detect_max_banks(denali);
- denali_nand_reset(denali);
- iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
- iowrite32(CHIP_EN_DONT_CARE__FLAG,
- denali->flash_reg + CHIP_ENABLE_DONT_CARE);
+ iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
+ iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
- iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
+ iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
/* Should set value for these registers when init */
- iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
- iowrite32(1, denali->flash_reg + ECC_ENABLE);
- denali_nand_timing_set(denali);
- denali_irq_init(denali);
+ iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES);
+ iowrite32(1, denali->reg + ECC_ENABLE);
}
-/*
- * Althogh controller spec said SLC ECC is forceb to be 4bit,
- * but denali controller in MRST only support 15bit and 8bit ECC
- * correction
- */
-#define ECC_8BITS 14
-#define ECC_15BITS 26
+int denali_calc_ecc_bytes(int step_size, int strength)
+{
+ /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
+ return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
+}
+EXPORT_SYMBOL(denali_calc_ecc_bytes);
+
+static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
+ struct denali_nand_info *denali)
+{
+ int oobavail = mtd->oobsize - denali->oob_skip_bytes;
+ int ret;
+
+ /*
+ * If .size and .strength are already set (usually by DT),
+ * check if they are supported by this controller.
+ */
+ if (chip->ecc.size && chip->ecc.strength)
+ return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
+
+ /*
+ * We want .size and .strength closest to the chip's requirement
+ * unless NAND_ECC_MAXIMIZE is requested.
+ */
+ if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
+ ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
+ if (!ret)
+ return 0;
+ }
+
+ /* Max ECC strength is the last thing we can do */
+ return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
+}
static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
@@ -1362,7 +1188,7 @@ static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
if (section)
return -ERANGE;
- oobregion->offset = denali->bbtskipbytes;
+ oobregion->offset = denali->oob_skip_bytes;
oobregion->length = chip->ecc.total;
return 0;
@@ -1377,7 +1203,7 @@ static int denali_ooblayout_free(struct mtd_info *mtd, int section,
if (section)
return -ERANGE;
- oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
+ oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
oobregion->length = mtd->oobsize - oobregion->offset;
return 0;
@@ -1388,29 +1214,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
.free = denali_ooblayout_free,
};
-static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
-static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
-
-static struct nand_bbt_descr bbt_main_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
- .offs = 8,
- .len = 4,
- .veroffs = 12,
- .maxblocks = 4,
- .pattern = bbt_pattern,
-};
-
-static struct nand_bbt_descr bbt_mirror_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
- .offs = 8,
- .len = 4,
- .veroffs = 12,
- .maxblocks = 4,
- .pattern = mirror_pattern,
-};
-
/* initialize driver data structures */
static void denali_drv_init(struct denali_nand_info *denali)
{
@@ -1425,12 +1228,6 @@ static void denali_drv_init(struct denali_nand_info *denali)
* element that might be access shared data (interrupt status)
*/
spin_lock_init(&denali->irq_lock);
-
- /* indicate that MTD has not selected a valid bank yet */
- denali->flash_bank = CHIP_SELECT_INVALID;
-
- /* initialize our irq_status variable to indicate no interrupts */
- denali->irq_status = 0;
}
static int denali_multidev_fixup(struct denali_nand_info *denali)
@@ -1445,23 +1242,23 @@ static int denali_multidev_fixup(struct denali_nand_info *denali)
* In this case, the core framework knows nothing about this fact,
* so we should tell it the _logical_ pagesize and anything necessary.
*/
- denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
/*
* On some SoCs, DEVICES_CONNECTED is not auto-detected.
* For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
*/
- if (denali->devnum == 0) {
- denali->devnum = 1;
- iowrite32(1, denali->flash_reg + DEVICES_CONNECTED);
+ if (denali->devs_per_cs == 0) {
+ denali->devs_per_cs = 1;
+ iowrite32(1, denali->reg + DEVICES_CONNECTED);
}
- if (denali->devnum == 1)
+ if (denali->devs_per_cs == 1)
return 0;
- if (denali->devnum != 2) {
+ if (denali->devs_per_cs != 2) {
dev_err(denali->dev, "unsupported number of devices %d\n",
- denali->devnum);
+ denali->devs_per_cs);
return -EINVAL;
}
@@ -1479,7 +1276,7 @@ static int denali_multidev_fixup(struct denali_nand_info *denali)
chip->ecc.size <<= 1;
chip->ecc.bytes <<= 1;
chip->ecc.strength <<= 1;
- denali->bbtskipbytes <<= 1;
+ denali->oob_skip_bytes <<= 1;
return 0;
}
@@ -1490,27 +1287,12 @@ int denali_init(struct denali_nand_info *denali)
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
- if (denali->platform == INTEL_CE4100) {
- /*
- * Due to a silicon limitation, we can only support
- * ONFI timing mode 1 and below.
- */
- if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
- pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
- return -EINVAL;
- }
- }
-
- /* allocate a temporary buffer for nand_scan_ident() */
- denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
- GFP_DMA | GFP_KERNEL);
- if (!denali->buf.buf)
- return -ENOMEM;
-
mtd->dev.parent = denali->dev;
denali_hw_init(denali);
denali_drv_init(denali);
+ denali_clear_irq_all(denali);
+
/* Request IRQ after all the hardware initialization is finished */
ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
IRQF_SHARED, DENALI_NAND_NAME, denali);
@@ -1519,8 +1301,11 @@ int denali_init(struct denali_nand_info *denali)
return ret;
}
- /* now that our ISR is registered, we can enable interrupts */
- denali_set_intr_modes(denali, true);
+ denali_enable_irq(denali);
+ denali_reset_banks(denali);
+
+ denali->active_bank = DENALI_INVALID_BANK;
+
nand_set_flash_node(chip, denali->dev->of_node);
/* Fallback to the default name if DT did not give "label" property */
if (!mtd->name)
@@ -1528,10 +1313,17 @@ int denali_init(struct denali_nand_info *denali)
/* register the driver with the NAND core subsystem */
chip->select_chip = denali_select_chip;
- chip->cmdfunc = denali_cmdfunc;
chip->read_byte = denali_read_byte;
+ chip->write_byte = denali_write_byte;
+ chip->read_word = denali_read_word;
+ chip->cmd_ctrl = denali_cmd_ctrl;
+ chip->dev_ready = denali_dev_ready;
chip->waitfunc = denali_waitfunc;
+ /* clk rate info is needed for setup_data_interface */
+ if (denali->clk_x_rate)
+ chip->setup_data_interface = denali_setup_data_interface;
+
/*
* scan for NAND devices attached to the controller
* this is the first stage in a two step process to register
@@ -1539,33 +1331,25 @@ int denali_init(struct denali_nand_info *denali)
*/
ret = nand_scan_ident(mtd, denali->max_banks, NULL);
if (ret)
- goto failed_req_irq;
-
- /* allocate the right size buffer now */
- devm_kfree(denali->dev, denali->buf.buf);
- denali->buf.buf = devm_kzalloc(denali->dev,
- mtd->writesize + mtd->oobsize,
- GFP_KERNEL);
- if (!denali->buf.buf) {
- ret = -ENOMEM;
- goto failed_req_irq;
- }
+ goto disable_irq;
- ret = dma_set_mask(denali->dev,
- DMA_BIT_MASK(denali->caps & DENALI_CAP_DMA_64BIT ?
- 64 : 32));
- if (ret) {
- dev_err(denali->dev, "No usable DMA configuration\n");
- goto failed_req_irq;
+ if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
+ denali->dma_avail = 1;
+
+ if (denali->dma_avail) {
+ int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
+
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
+ if (ret) {
+ dev_info(denali->dev,
+ "Failed to set DMA mask. Disabling DMA.\n");
+ denali->dma_avail = 0;
+ }
}
- denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
- mtd->writesize + mtd->oobsize,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
- dev_err(denali->dev, "Failed to map DMA buffer\n");
- ret = -EIO;
- goto failed_req_irq;
+ if (denali->dma_avail) {
+ chip->options |= NAND_USE_BOUNCE_BUFFER;
+ chip->buf_align = 16;
}
/*
@@ -1574,46 +1358,49 @@ int denali_init(struct denali_nand_info *denali)
* bad block management.
*/
- /* Bad block management */
- chip->bbt_td = &bbt_main_descr;
- chip->bbt_md = &bbt_mirror_descr;
-
- /* skip the scan for now until we have OOB read and write support */
chip->bbt_options |= NAND_BBT_USE_FLASH;
- chip->options |= NAND_SKIP_BBTSCAN;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
/* no subpage writes on denali */
chip->options |= NAND_NO_SUBPAGE_WRITE;
- /*
- * Denali Controller only support 15bit and 8bit ECC in MRST,
- * so just let controller do 15bit ECC for MLC and 8bit ECC for
- * SLC if possible.
- * */
- if (!nand_is_slc(chip) &&
- (mtd->oobsize > (denali->bbtskipbytes +
- ECC_15BITS * (mtd->writesize /
- ECC_SECTOR_SIZE)))) {
- /* if MLC OOB size is large enough, use 15bit ECC*/
- chip->ecc.strength = 15;
- chip->ecc.bytes = ECC_15BITS;
- iowrite32(15, denali->flash_reg + ECC_CORRECTION);
- } else if (mtd->oobsize < (denali->bbtskipbytes +
- ECC_8BITS * (mtd->writesize /
- ECC_SECTOR_SIZE))) {
- pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes");
- goto failed_req_irq;
- } else {
- chip->ecc.strength = 8;
- chip->ecc.bytes = ECC_8BITS;
- iowrite32(8, denali->flash_reg + ECC_CORRECTION);
+ ret = denali_ecc_setup(mtd, chip, denali);
+ if (ret) {
+ dev_err(denali->dev, "Failed to setup ECC settings.\n");
+ goto disable_irq;
}
+ dev_dbg(denali->dev,
+ "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
+ chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
+
+ iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1),
+ denali->reg + ECC_CORRECTION);
+ iowrite32(mtd->erasesize / mtd->writesize,
+ denali->reg + PAGES_PER_BLOCK);
+ iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
+ denali->reg + DEVICE_WIDTH);
+ iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
+ iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
+
+ iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
+ iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
+ /* chip->ecc.steps is set by nand_scan_tail(); not available here */
+ iowrite32(mtd->writesize / chip->ecc.size,
+ denali->reg + CFG_NUM_DATA_BLOCKS);
+
mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
- /* override the default read operations */
- chip->ecc.size = ECC_SECTOR_SIZE;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ chip->read_buf = denali_read_buf16;
+ chip->write_buf = denali_write_buf16;
+ } else {
+ chip->read_buf = denali_read_buf;
+ chip->write_buf = denali_write_buf;
+ }
+ chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
chip->ecc.read_page = denali_read_page;
chip->ecc.read_page_raw = denali_read_page_raw;
chip->ecc.write_page = denali_write_page;
@@ -1624,21 +1411,34 @@ int denali_init(struct denali_nand_info *denali)
ret = denali_multidev_fixup(denali);
if (ret)
- goto failed_req_irq;
+ goto disable_irq;
+
+ /*
+ * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
+ * use devm_kmalloc() because the memory allocated by devm_ does not
+ * guarantee DMA-safe alignment.
+ */
+ denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!denali->buf) {
+ ret = -ENOMEM;
+ goto disable_irq;
+ }
ret = nand_scan_tail(mtd);
if (ret)
- goto failed_req_irq;
+ goto free_buf;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
- goto failed_req_irq;
+ goto free_buf;
}
return 0;
-failed_req_irq:
- denali_irq_cleanup(denali->irq, denali);
+free_buf:
+ kfree(denali->buf);
+disable_irq:
+ denali_disable_irq(denali);
return ret;
}
@@ -1648,16 +1448,9 @@ EXPORT_SYMBOL(denali_init);
void denali_remove(struct denali_nand_info *denali)
{
struct mtd_info *mtd = nand_to_mtd(&denali->nand);
- /*
- * Pre-compute DMA buffer size to avoid any problems in case
- * nand_release() ever changes in a way that mtd->writesize and
- * mtd->oobsize are not reliable after this call.
- */
- int bufsize = mtd->writesize + mtd->oobsize;
nand_release(mtd);
- denali_irq_cleanup(denali->irq, denali);
- dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize,
- DMA_BIDIRECTIONAL);
+ kfree(denali->buf);
+ denali_disable_irq(denali);
}
EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index ec004850652a..237cc706b0fb 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -24,330 +24,315 @@
#include <linux/mtd/nand.h>
#define DEVICE_RESET 0x0
-#define DEVICE_RESET__BANK0 0x0001
-#define DEVICE_RESET__BANK1 0x0002
-#define DEVICE_RESET__BANK2 0x0004
-#define DEVICE_RESET__BANK3 0x0008
+#define DEVICE_RESET__BANK(bank) BIT(bank)
#define TRANSFER_SPARE_REG 0x10
-#define TRANSFER_SPARE_REG__FLAG 0x0001
+#define TRANSFER_SPARE_REG__FLAG BIT(0)
#define LOAD_WAIT_CNT 0x20
-#define LOAD_WAIT_CNT__VALUE 0xffff
+#define LOAD_WAIT_CNT__VALUE GENMASK(15, 0)
#define PROGRAM_WAIT_CNT 0x30
-#define PROGRAM_WAIT_CNT__VALUE 0xffff
+#define PROGRAM_WAIT_CNT__VALUE GENMASK(15, 0)
#define ERASE_WAIT_CNT 0x40
-#define ERASE_WAIT_CNT__VALUE 0xffff
+#define ERASE_WAIT_CNT__VALUE GENMASK(15, 0)
#define INT_MON_CYCCNT 0x50
-#define INT_MON_CYCCNT__VALUE 0xffff
+#define INT_MON_CYCCNT__VALUE GENMASK(15, 0)
#define RB_PIN_ENABLED 0x60
-#define RB_PIN_ENABLED__BANK0 0x0001
-#define RB_PIN_ENABLED__BANK1 0x0002
-#define RB_PIN_ENABLED__BANK2 0x0004
-#define RB_PIN_ENABLED__BANK3 0x0008
+#define RB_PIN_ENABLED__BANK(bank) BIT(bank)
#define MULTIPLANE_OPERATION 0x70
-#define MULTIPLANE_OPERATION__FLAG 0x0001
+#define MULTIPLANE_OPERATION__FLAG BIT(0)
#define MULTIPLANE_READ_ENABLE 0x80
-#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
+#define MULTIPLANE_READ_ENABLE__FLAG BIT(0)
#define COPYBACK_DISABLE 0x90
-#define COPYBACK_DISABLE__FLAG 0x0001
+#define COPYBACK_DISABLE__FLAG BIT(0)
#define CACHE_WRITE_ENABLE 0xa0
-#define CACHE_WRITE_ENABLE__FLAG 0x0001
+#define CACHE_WRITE_ENABLE__FLAG BIT(0)
#define CACHE_READ_ENABLE 0xb0
-#define CACHE_READ_ENABLE__FLAG 0x0001
+#define CACHE_READ_ENABLE__FLAG BIT(0)
#define PREFETCH_MODE 0xc0
-#define PREFETCH_MODE__PREFETCH_EN 0x0001
-#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
+#define PREFETCH_MODE__PREFETCH_EN BIT(0)
+#define PREFETCH_MODE__PREFETCH_BURST_LENGTH GENMASK(15, 4)
#define CHIP_ENABLE_DONT_CARE 0xd0
-#define CHIP_EN_DONT_CARE__FLAG 0x01
+#define CHIP_EN_DONT_CARE__FLAG BIT(0)
#define ECC_ENABLE 0xe0
-#define ECC_ENABLE__FLAG 0x0001
+#define ECC_ENABLE__FLAG BIT(0)
#define GLOBAL_INT_ENABLE 0xf0
-#define GLOBAL_INT_EN_FLAG 0x01
+#define GLOBAL_INT_EN_FLAG BIT(0)
-#define WE_2_RE 0x100
-#define WE_2_RE__VALUE 0x003f
+#define TWHR2_AND_WE_2_RE 0x100
+#define TWHR2_AND_WE_2_RE__WE_2_RE GENMASK(5, 0)
+#define TWHR2_AND_WE_2_RE__TWHR2 GENMASK(13, 8)
-#define ADDR_2_DATA 0x110
-#define ADDR_2_DATA__VALUE 0x003f
+#define TCWAW_AND_ADDR_2_DATA 0x110
+/* The width of ADDR_2_DATA is 6 bit for old IP, 7 bit for new IP */
+#define TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA GENMASK(6, 0)
+#define TCWAW_AND_ADDR_2_DATA__TCWAW GENMASK(13, 8)
#define RE_2_WE 0x120
-#define RE_2_WE__VALUE 0x003f
+#define RE_2_WE__VALUE GENMASK(5, 0)
#define ACC_CLKS 0x130
-#define ACC_CLKS__VALUE 0x000f
+#define ACC_CLKS__VALUE GENMASK(3, 0)
#define NUMBER_OF_PLANES 0x140
-#define NUMBER_OF_PLANES__VALUE 0x0007
+#define NUMBER_OF_PLANES__VALUE GENMASK(2, 0)
#define PAGES_PER_BLOCK 0x150
-#define PAGES_PER_BLOCK__VALUE 0xffff
+#define PAGES_PER_BLOCK__VALUE GENMASK(15, 0)
#define DEVICE_WIDTH 0x160
-#define DEVICE_WIDTH__VALUE 0x0003
+#define DEVICE_WIDTH__VALUE GENMASK(1, 0)
#define DEVICE_MAIN_AREA_SIZE 0x170
-#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
+#define DEVICE_MAIN_AREA_SIZE__VALUE GENMASK(15, 0)
#define DEVICE_SPARE_AREA_SIZE 0x180
-#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
+#define DEVICE_SPARE_AREA_SIZE__VALUE GENMASK(15, 0)
#define TWO_ROW_ADDR_CYCLES 0x190
-#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
+#define TWO_ROW_ADDR_CYCLES__FLAG BIT(0)
#define MULTIPLANE_ADDR_RESTRICT 0x1a0
-#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
+#define MULTIPLANE_ADDR_RESTRICT__FLAG BIT(0)
#define ECC_CORRECTION 0x1b0
-#define ECC_CORRECTION__VALUE 0x001f
+#define ECC_CORRECTION__VALUE GENMASK(4, 0)
+#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16)
+#define MAKE_ECC_CORRECTION(val, thresh) \
+ (((val) & (ECC_CORRECTION__VALUE)) | \
+ (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD)))
#define READ_MODE 0x1c0
-#define READ_MODE__VALUE 0x000f
+#define READ_MODE__VALUE GENMASK(3, 0)
#define WRITE_MODE 0x1d0
-#define WRITE_MODE__VALUE 0x000f
+#define WRITE_MODE__VALUE GENMASK(3, 0)
#define COPYBACK_MODE 0x1e0
-#define COPYBACK_MODE__VALUE 0x000f
+#define COPYBACK_MODE__VALUE GENMASK(3, 0)
#define RDWR_EN_LO_CNT 0x1f0
-#define RDWR_EN_LO_CNT__VALUE 0x001f
+#define RDWR_EN_LO_CNT__VALUE GENMASK(4, 0)
#define RDWR_EN_HI_CNT 0x200
-#define RDWR_EN_HI_CNT__VALUE 0x001f
+#define RDWR_EN_HI_CNT__VALUE GENMASK(4, 0)
#define MAX_RD_DELAY 0x210
-#define MAX_RD_DELAY__VALUE 0x000f
+#define MAX_RD_DELAY__VALUE GENMASK(3, 0)
#define CS_SETUP_CNT 0x220
-#define CS_SETUP_CNT__VALUE 0x001f
+#define CS_SETUP_CNT__VALUE GENMASK(4, 0)
+#define CS_SETUP_CNT__TWB GENMASK(17, 12)
#define SPARE_AREA_SKIP_BYTES 0x230
-#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
+#define SPARE_AREA_SKIP_BYTES__VALUE GENMASK(5, 0)
#define SPARE_AREA_MARKER 0x240
-#define SPARE_AREA_MARKER__VALUE 0xffff
+#define SPARE_AREA_MARKER__VALUE GENMASK(15, 0)
#define DEVICES_CONNECTED 0x250
-#define DEVICES_CONNECTED__VALUE 0x0007
+#define DEVICES_CONNECTED__VALUE GENMASK(2, 0)
#define DIE_MASK 0x260
-#define DIE_MASK__VALUE 0x00ff
+#define DIE_MASK__VALUE GENMASK(7, 0)
#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
-#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
+#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE GENMASK(15, 0)
#define WRITE_PROTECT 0x280
-#define WRITE_PROTECT__FLAG 0x0001
+#define WRITE_PROTECT__FLAG BIT(0)
#define RE_2_RE 0x290
-#define RE_2_RE__VALUE 0x003f
+#define RE_2_RE__VALUE GENMASK(5, 0)
#define MANUFACTURER_ID 0x300
-#define MANUFACTURER_ID__VALUE 0x00ff
+#define MANUFACTURER_ID__VALUE GENMASK(7, 0)
#define DEVICE_ID 0x310
-#define DEVICE_ID__VALUE 0x00ff
+#define DEVICE_ID__VALUE GENMASK(7, 0)
#define DEVICE_PARAM_0 0x320
-#define DEVICE_PARAM_0__VALUE 0x00ff
+#define DEVICE_PARAM_0__VALUE GENMASK(7, 0)
#define DEVICE_PARAM_1 0x330
-#define DEVICE_PARAM_1__VALUE 0x00ff
+#define DEVICE_PARAM_1__VALUE GENMASK(7, 0)
#define DEVICE_PARAM_2 0x340
-#define DEVICE_PARAM_2__VALUE 0x00ff
+#define DEVICE_PARAM_2__VALUE GENMASK(7, 0)
#define LOGICAL_PAGE_DATA_SIZE 0x350
-#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
+#define LOGICAL_PAGE_DATA_SIZE__VALUE GENMASK(15, 0)
#define LOGICAL_PAGE_SPARE_SIZE 0x360
-#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
+#define LOGICAL_PAGE_SPARE_SIZE__VALUE GENMASK(15, 0)
#define REVISION 0x370
-#define REVISION__VALUE 0xffff
+#define REVISION__VALUE GENMASK(15, 0)
#define ONFI_DEVICE_FEATURES 0x380
-#define ONFI_DEVICE_FEATURES__VALUE 0x003f
+#define ONFI_DEVICE_FEATURES__VALUE GENMASK(5, 0)
#define ONFI_OPTIONAL_COMMANDS 0x390
-#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
+#define ONFI_OPTIONAL_COMMANDS__VALUE GENMASK(5, 0)
#define ONFI_TIMING_MODE 0x3a0
-#define ONFI_TIMING_MODE__VALUE 0x003f
+#define ONFI_TIMING_MODE__VALUE GENMASK(5, 0)
#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
-#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
+#define ONFI_PGM_CACHE_TIMING_MODE__VALUE GENMASK(5, 0)
#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
-#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
-#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
+#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS GENMASK(7, 0)
+#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE BIT(8)
#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE GENMASK(15, 0)
#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
-
-#define FEATURES 0x3f0
-#define FEATURES__N_BANKS 0x0003
-#define FEATURES__ECC_MAX_ERR 0x003c
-#define FEATURES__DMA 0x0040
-#define FEATURES__CMD_DMA 0x0080
-#define FEATURES__PARTITION 0x0100
-#define FEATURES__XDMA_SIDEBAND 0x0200
-#define FEATURES__GPREG 0x0400
-#define FEATURES__INDEX_ADDR 0x0800
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE GENMASK(15, 0)
+
+#define FEATURES 0x3f0
+#define FEATURES__N_BANKS GENMASK(1, 0)
+#define FEATURES__ECC_MAX_ERR GENMASK(5, 2)
+#define FEATURES__DMA BIT(6)
+#define FEATURES__CMD_DMA BIT(7)
+#define FEATURES__PARTITION BIT(8)
+#define FEATURES__XDMA_SIDEBAND BIT(9)
+#define FEATURES__GPREG BIT(10)
+#define FEATURES__INDEX_ADDR BIT(11)
#define TRANSFER_MODE 0x400
-#define TRANSFER_MODE__VALUE 0x0003
+#define TRANSFER_MODE__VALUE GENMASK(1, 0)
-#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50))
-#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50))
+#define INTR_STATUS(bank) (0x410 + (bank) * 0x50)
+#define INTR_EN(bank) (0x420 + (bank) * 0x50)
/* bit[1:0] is used differently depending on IP version */
-#define INTR__ECC_UNCOR_ERR 0x0001 /* new IP */
-#define INTR__ECC_TRANSACTION_DONE 0x0001 /* old IP */
-#define INTR__ECC_ERR 0x0002 /* old IP */
-#define INTR__DMA_CMD_COMP 0x0004
-#define INTR__TIME_OUT 0x0008
-#define INTR__PROGRAM_FAIL 0x0010
-#define INTR__ERASE_FAIL 0x0020
-#define INTR__LOAD_COMP 0x0040
-#define INTR__PROGRAM_COMP 0x0080
-#define INTR__ERASE_COMP 0x0100
-#define INTR__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR__LOCKED_BLK 0x0400
-#define INTR__UNSUP_CMD 0x0800
-#define INTR__INT_ACT 0x1000
-#define INTR__RST_COMP 0x2000
-#define INTR__PIPE_CMD_ERR 0x4000
-#define INTR__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50))
-#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50))
-#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50))
+#define INTR__ECC_UNCOR_ERR BIT(0) /* new IP */
+#define INTR__ECC_TRANSACTION_DONE BIT(0) /* old IP */
+#define INTR__ECC_ERR BIT(1) /* old IP */
+#define INTR__DMA_CMD_COMP BIT(2)
+#define INTR__TIME_OUT BIT(3)
+#define INTR__PROGRAM_FAIL BIT(4)
+#define INTR__ERASE_FAIL BIT(5)
+#define INTR__LOAD_COMP BIT(6)
+#define INTR__PROGRAM_COMP BIT(7)
+#define INTR__ERASE_COMP BIT(8)
+#define INTR__PIPE_CPYBCK_CMD_COMP BIT(9)
+#define INTR__LOCKED_BLK BIT(10)
+#define INTR__UNSUP_CMD BIT(11)
+#define INTR__INT_ACT BIT(12)
+#define INTR__RST_COMP BIT(13)
+#define INTR__PIPE_CMD_ERR BIT(14)
+#define INTR__PAGE_XFER_INC BIT(15)
+#define INTR__ERASED_PAGE BIT(16)
+
+#define PAGE_CNT(bank) (0x430 + (bank) * 0x50)
+#define ERR_PAGE_ADDR(bank) (0x440 + (bank) * 0x50)
+#define ERR_BLOCK_ADDR(bank) (0x450 + (bank) * 0x50)
#define ECC_THRESHOLD 0x600
-#define ECC_THRESHOLD__VALUE 0x03ff
+#define ECC_THRESHOLD__VALUE GENMASK(9, 0)
#define ECC_ERROR_BLOCK_ADDRESS 0x610
-#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
+#define ECC_ERROR_BLOCK_ADDRESS__VALUE GENMASK(15, 0)
#define ECC_ERROR_PAGE_ADDRESS 0x620
-#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
-#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
+#define ECC_ERROR_PAGE_ADDRESS__VALUE GENMASK(11, 0)
+#define ECC_ERROR_PAGE_ADDRESS__BANK GENMASK(15, 12)
#define ECC_ERROR_ADDRESS 0x630
-#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
-#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
+#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0)
+#define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12)
#define ERR_CORRECTION_INFO 0x640
-#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
-#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
-#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
-#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
+#define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0)
+#define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8)
+#define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14)
+#define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15)
#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
-#define ECC_COR_INFO__MAX_ERRORS 0x007f
-#define ECC_COR_INFO__UNCOR_ERR 0x0080
+#define ECC_COR_INFO__MAX_ERRORS GENMASK(6, 0)
+#define ECC_COR_INFO__UNCOR_ERR BIT(7)
+
+#define CFG_DATA_BLOCK_SIZE 0x6b0
+
+#define CFG_LAST_DATA_BLOCK_SIZE 0x6c0
+
+#define CFG_NUM_DATA_BLOCKS 0x6d0
+
+#define CFG_META_DATA_SIZE 0x6e0
#define DMA_ENABLE 0x700
-#define DMA_ENABLE__FLAG 0x0001
+#define DMA_ENABLE__FLAG BIT(0)
#define IGNORE_ECC_DONE 0x710
-#define IGNORE_ECC_DONE__FLAG 0x0001
+#define IGNORE_ECC_DONE__FLAG BIT(0)
#define DMA_INTR 0x720
#define DMA_INTR_EN 0x730
-#define DMA_INTR__TARGET_ERROR 0x0001
-#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
-#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
-#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
-#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
-#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
+#define DMA_INTR__TARGET_ERROR BIT(0)
+#define DMA_INTR__DESC_COMP_CHANNEL0 BIT(1)
+#define DMA_INTR__DESC_COMP_CHANNEL1 BIT(2)
+#define DMA_INTR__DESC_COMP_CHANNEL2 BIT(3)
+#define DMA_INTR__DESC_COMP_CHANNEL3 BIT(4)
+#define DMA_INTR__MEMCOPY_DESC_COMP BIT(5)
#define TARGET_ERR_ADDR_LO 0x740
-#define TARGET_ERR_ADDR_LO__VALUE 0xffff
+#define TARGET_ERR_ADDR_LO__VALUE GENMASK(15, 0)
#define TARGET_ERR_ADDR_HI 0x750
-#define TARGET_ERR_ADDR_HI__VALUE 0xffff
+#define TARGET_ERR_ADDR_HI__VALUE GENMASK(15, 0)
#define CHNL_ACTIVE 0x760
-#define CHNL_ACTIVE__CHANNEL0 0x0001
-#define CHNL_ACTIVE__CHANNEL1 0x0002
-#define CHNL_ACTIVE__CHANNEL2 0x0004
-#define CHNL_ACTIVE__CHANNEL3 0x0008
-
-#define FAIL 1 /*failed flag*/
-#define PASS 0 /*success flag*/
-
-#define CLK_X 5
-#define CLK_MULTI 4
-
-#define ONFI_BLOOM_TIME 1
-#define MODE5_WORKAROUND 0
-
-
-#define MODE_00 0x00000000
-#define MODE_01 0x04000000
-#define MODE_10 0x08000000
-#define MODE_11 0x0C000000
-
-#define ECC_SECTOR_SIZE 512
-
-struct nand_buf {
- int head;
- int tail;
- uint8_t *buf;
- dma_addr_t dma_buf;
-};
-
-#define INTEL_CE4100 1
-#define INTEL_MRST 2
-#define DT 3
+#define CHNL_ACTIVE__CHANNEL0 BIT(0)
+#define CHNL_ACTIVE__CHANNEL1 BIT(1)
+#define CHNL_ACTIVE__CHANNEL2 BIT(2)
+#define CHNL_ACTIVE__CHANNEL3 BIT(3)
struct denali_nand_info {
struct nand_chip nand;
- int flash_bank; /* currently selected chip */
- int status;
- int platform;
- struct nand_buf buf;
+ unsigned long clk_x_rate; /* bus interface clock rate */
+ int active_bank; /* currently selected bank */
struct device *dev;
- int total_used_banks;
- int page;
- void __iomem *flash_reg; /* Register Interface */
- void __iomem *flash_mem; /* Host Data/Command Interface */
+ void __iomem *reg; /* Register Interface */
+ void __iomem *host; /* Host Data/Command Interface */
/* elements used by ISR */
struct completion complete;
spinlock_t irq_lock;
+ uint32_t irq_mask;
uint32_t irq_status;
int irq;
- int devnum; /* represent how many nands connected */
- int bbtskipbytes;
+ void *buf;
+ dma_addr_t dma_addr;
+ int dma_avail;
+ int devs_per_cs; /* devices connected in parallel */
+ int oob_skip_bytes;
int max_banks;
unsigned int revision;
unsigned int caps;
+ const struct nand_ecc_caps *ecc_caps;
};
#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
#define DENALI_CAP_DMA_64BIT BIT(1)
+int denali_calc_ecc_bytes(int step_size, int strength);
extern int denali_init(struct denali_nand_info *denali);
extern void denali_remove(struct denali_nand_info *denali);
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index df9ef36cc2ce..47f398edf18f 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -32,10 +32,31 @@ struct denali_dt {
struct denali_dt_data {
unsigned int revision;
unsigned int caps;
+ const struct nand_ecc_caps *ecc_caps;
};
+NAND_ECC_CAPS_SINGLE(denali_socfpga_ecc_caps, denali_calc_ecc_bytes,
+ 512, 8, 15);
static const struct denali_dt_data denali_socfpga_data = {
.caps = DENALI_CAP_HW_ECC_FIXUP,
+ .ecc_caps = &denali_socfpga_ecc_caps,
+};
+
+NAND_ECC_CAPS_SINGLE(denali_uniphier_v5a_ecc_caps, denali_calc_ecc_bytes,
+ 1024, 8, 16, 24);
+static const struct denali_dt_data denali_uniphier_v5a_data = {
+ .caps = DENALI_CAP_HW_ECC_FIXUP |
+ DENALI_CAP_DMA_64BIT,
+ .ecc_caps = &denali_uniphier_v5a_ecc_caps,
+};
+
+NAND_ECC_CAPS_SINGLE(denali_uniphier_v5b_ecc_caps, denali_calc_ecc_bytes,
+ 1024, 8, 16);
+static const struct denali_dt_data denali_uniphier_v5b_data = {
+ .revision = 0x0501,
+ .caps = DENALI_CAP_HW_ECC_FIXUP |
+ DENALI_CAP_DMA_64BIT,
+ .ecc_caps = &denali_uniphier_v5b_ecc_caps,
};
static const struct of_device_id denali_nand_dt_ids[] = {
@@ -43,13 +64,21 @@ static const struct of_device_id denali_nand_dt_ids[] = {
.compatible = "altr,socfpga-denali-nand",
.data = &denali_socfpga_data,
},
+ {
+ .compatible = "socionext,uniphier-denali-nand-v5a",
+ .data = &denali_uniphier_v5a_data,
+ },
+ {
+ .compatible = "socionext,uniphier-denali-nand-v5b",
+ .data = &denali_uniphier_v5b_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
static int denali_dt_probe(struct platform_device *pdev)
{
- struct resource *denali_reg, *nand_data;
+ struct resource *res;
struct denali_dt *dt;
const struct denali_dt_data *data;
struct denali_nand_info *denali;
@@ -64,9 +93,9 @@ static int denali_dt_probe(struct platform_device *pdev)
if (data) {
denali->revision = data->revision;
denali->caps = data->caps;
+ denali->ecc_caps = data->ecc_caps;
}
- denali->platform = DT;
denali->dev = &pdev->dev;
denali->irq = platform_get_irq(pdev, 0);
if (denali->irq < 0) {
@@ -74,17 +103,15 @@ static int denali_dt_probe(struct platform_device *pdev)
return denali->irq;
}
- denali_reg = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "denali_reg");
- denali->flash_reg = devm_ioremap_resource(&pdev->dev, denali_reg);
- if (IS_ERR(denali->flash_reg))
- return PTR_ERR(denali->flash_reg);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "denali_reg");
+ denali->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(denali->reg))
+ return PTR_ERR(denali->reg);
- nand_data = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "nand_data");
- denali->flash_mem = devm_ioremap_resource(&pdev->dev, nand_data);
- if (IS_ERR(denali->flash_mem))
- return PTR_ERR(denali->flash_mem);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
+ denali->host = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(denali->host))
+ return PTR_ERR(denali->host);
dt->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dt->clk)) {
@@ -93,6 +120,8 @@ static int denali_dt_probe(struct platform_device *pdev)
}
clk_prepare_enable(dt->clk);
+ denali->clk_x_rate = clk_get_rate(dt->clk);
+
ret = denali_init(denali);
if (ret)
goto out_disable_clk;
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index ac843238b77e..81370c79aa48 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -19,6 +19,9 @@
#define DENALI_NAND_NAME "denali-nand-pci"
+#define INTEL_CE4100 1
+#define INTEL_MRST 2
+
/* List of platforms this NAND controller has be integrated into */
static const struct pci_device_id denali_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
@@ -27,6 +30,8 @@ static const struct pci_device_id denali_pci_ids[] = {
};
MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+NAND_ECC_CAPS_SINGLE(denali_pci_ecc_caps, denali_calc_ecc_bytes, 512, 8, 15);
+
static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
@@ -45,13 +50,11 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
if (id->driver_data == INTEL_CE4100) {
- denali->platform = INTEL_CE4100;
mem_base = pci_resource_start(dev, 0);
mem_len = pci_resource_len(dev, 1);
csr_base = pci_resource_start(dev, 1);
csr_len = pci_resource_len(dev, 1);
} else {
- denali->platform = INTEL_MRST;
csr_base = pci_resource_start(dev, 0);
csr_len = pci_resource_len(dev, 0);
mem_base = pci_resource_start(dev, 1);
@@ -65,6 +68,9 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_set_master(dev);
denali->dev = &dev->dev;
denali->irq = dev->irq;
+ denali->ecc_caps = &denali_pci_ecc_caps;
+ denali->nand.ecc.options |= NAND_ECC_MAXIMIZE;
+ denali->clk_x_rate = 200000000; /* 200 MHz */
ret = pci_request_regions(dev, DENALI_NAND_NAME);
if (ret) {
@@ -72,14 +78,14 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return ret;
}
- denali->flash_reg = ioremap_nocache(csr_base, csr_len);
- if (!denali->flash_reg) {
+ denali->reg = ioremap_nocache(csr_base, csr_len);
+ if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
return -ENOMEM;
}
- denali->flash_mem = ioremap_nocache(mem_base, mem_len);
- if (!denali->flash_mem) {
+ denali->host = ioremap_nocache(mem_base, mem_len);
+ if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
ret = -ENOMEM;
goto failed_remap_reg;
@@ -94,9 +100,9 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0;
failed_remap_mem:
- iounmap(denali->flash_mem);
+ iounmap(denali->host);
failed_remap_reg:
- iounmap(denali->flash_reg);
+ iounmap(denali->reg);
return ret;
}
@@ -106,8 +112,8 @@ static void denali_pci_remove(struct pci_dev *dev)
struct denali_nand_info *denali = pci_get_drvdata(dev);
denali_remove(denali);
- iounmap(denali->flash_reg);
- iounmap(denali->flash_mem);
+ iounmap(denali->reg);
+ iounmap(denali->host);
}
static struct pci_driver denali_pci_driver = {
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 7af2a3cd949e..a27a84fbfb84 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -1260,6 +1260,8 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
nand->read_buf = docg4_read_buf;
nand->write_buf = docg4_write_buf16;
nand->erase = docg4_erase_block;
+ nand->onfi_set_features = nand_onfi_get_set_features_notsupp;
+ nand->onfi_get_features = nand_onfi_get_set_features_notsupp;
nand->ecc.read_page = docg4_read_page;
nand->ecc.write_page = docg4_write_page;
nand->ecc.read_page_raw = docg4_read_page_raw;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 113f76e59937..b9ac16f05057 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -775,6 +775,8 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->select_chip = fsl_elbc_select_chip;
chip->cmdfunc = fsl_elbc_cmdfunc;
chip->waitfunc = fsl_elbc_wait;
+ chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
+ chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index d1570f512f0b..59408ec2c69f 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -171,34 +171,6 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
ifc_nand_ctrl->index += mtd->writesize;
}
-static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
- u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
- u32 __iomem *mainarea = (u32 __iomem *)addr;
- u8 __iomem *oob = addr + mtd->writesize;
- struct mtd_oob_region oobregion = { };
- int i, section = 0;
-
- for (i = 0; i < mtd->writesize / 4; i++) {
- if (__raw_readl(&mainarea[i]) != 0xffffffff)
- return 0;
- }
-
- mtd_ooblayout_ecc(mtd, section++, &oobregion);
- while (oobregion.length) {
- for (i = 0; i < oobregion.length; i++) {
- if (__raw_readb(&oob[oobregion.offset + i]) != 0xff)
- return 0;
- }
-
- mtd_ooblayout_ecc(mtd, section++, &oobregion);
- }
-
- return 1;
-}
-
/* returns nonzero if entire page is blank */
static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
u32 *eccstat, unsigned int bufnum)
@@ -274,16 +246,14 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
if (errors == 15) {
/*
* Uncorrectable error.
- * OK only if the whole page is blank.
+ * We'll check for blank pages later.
*
* We disable ECCER reporting due to...
* erratum IFC-A002770 -- so report it now if we
* see an uncorrectable error in ECCSTAT.
*/
- if (!is_blank(mtd, bufnum))
- ctrl->nand_stat |=
- IFC_NAND_EVTER_STAT_ECCER;
- break;
+ ctrl->nand_stat |= IFC_NAND_EVTER_STAT_ECCER;
+ continue;
}
mtd->ecc_stats.corrected += errors;
@@ -678,6 +648,39 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
return nand_fsr | NAND_STATUS_WP;
}
+/*
+ * The controller does not check for bitflips in erased pages,
+ * therefore software must check instead.
+ */
+static int check_erased_page(struct nand_chip *chip, u8 *buf)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *ecc = chip->oob_poi;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int i, res, bitflips = 0;
+ struct mtd_oob_region oobregion = { };
+
+ mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ ecc += oobregion.offset;
+
+ for (i = 0; i < chip->ecc.steps; ++i) {
+ res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size,
+ NULL, 0,
+ chip->ecc.strength);
+ if (res < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += res;
+
+ bitflips = max(res, bitflips);
+ buf += pkt_size;
+ ecc += ecc_size;
+ }
+
+ return bitflips;
+}
+
static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
@@ -689,8 +692,12 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
if (oob_required)
fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
- if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
- dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) {
+ if (!oob_required)
+ fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return check_erased_page(chip, buf);
+ }
if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
mtd->ecc_stats.failed++;
@@ -831,6 +838,8 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->select_chip = fsl_ifc_select_chip;
chip->cmdfunc = fsl_ifc_cmdfunc;
chip->waitfunc = fsl_ifc_wait;
+ chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
+ chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
@@ -904,7 +913,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.algo = NAND_ECC_HAMMING;
}
- if (ctrl->version == FSL_IFC_VERSION_1_1_0)
+ if (ctrl->version >= FSL_IFC_VERSION_1_1_0)
fsl_ifc_sram_init(priv);
return 0;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index cea50d2f218c..9d8b051d3187 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -302,25 +302,13 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
* This routine initializes timing parameters related to NAND memory access in
* FSMC registers
*/
-static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
- uint32_t busw, struct fsmc_nand_timings *timings)
+static void fsmc_nand_setup(struct fsmc_nand_data *host,
+ struct fsmc_nand_timings *tims)
{
uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
uint32_t tclr, tar, thiz, thold, twait, tset;
- struct fsmc_nand_timings *tims;
- struct fsmc_nand_timings default_timings = {
- .tclr = FSMC_TCLR_1,
- .tar = FSMC_TAR_1,
- .thiz = FSMC_THIZ_1,
- .thold = FSMC_THOLD_4,
- .twait = FSMC_TWAIT_6,
- .tset = FSMC_TSET_0,
- };
-
- if (timings)
- tims = timings;
- else
- tims = &default_timings;
+ unsigned int bank = host->bank;
+ void __iomem *regs = host->regs_va;
tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
@@ -329,7 +317,7 @@ static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
- if (busw)
+ if (host->nand.options & NAND_BUSWIDTH_16)
writel_relaxed(value | FSMC_DEVWID_16,
FSMC_NAND_REG(regs, bank, PC));
else
@@ -344,6 +332,87 @@ static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
FSMC_NAND_REG(regs, bank, ATTRIB));
}
+static int fsmc_calc_timings(struct fsmc_nand_data *host,
+ const struct nand_sdr_timings *sdrt,
+ struct fsmc_nand_timings *tims)
+{
+ unsigned long hclk = clk_get_rate(host->clk);
+ unsigned long hclkn = NSEC_PER_SEC / hclk;
+ uint32_t thiz, thold, twait, tset;
+
+ if (sdrt->tRC_min < 30000)
+ return -EOPNOTSUPP;
+
+ tims->tar = DIV_ROUND_UP(sdrt->tAR_min / 1000, hclkn) - 1;
+ if (tims->tar > FSMC_TAR_MASK)
+ tims->tar = FSMC_TAR_MASK;
+ tims->tclr = DIV_ROUND_UP(sdrt->tCLR_min / 1000, hclkn) - 1;
+ if (tims->tclr > FSMC_TCLR_MASK)
+ tims->tclr = FSMC_TCLR_MASK;
+
+ thiz = sdrt->tCS_min - sdrt->tWP_min;
+ tims->thiz = DIV_ROUND_UP(thiz / 1000, hclkn);
+
+ thold = sdrt->tDH_min;
+ if (thold < sdrt->tCH_min)
+ thold = sdrt->tCH_min;
+ if (thold < sdrt->tCLH_min)
+ thold = sdrt->tCLH_min;
+ if (thold < sdrt->tWH_min)
+ thold = sdrt->tWH_min;
+ if (thold < sdrt->tALH_min)
+ thold = sdrt->tALH_min;
+ if (thold < sdrt->tREH_min)
+ thold = sdrt->tREH_min;
+ tims->thold = DIV_ROUND_UP(thold / 1000, hclkn);
+ if (tims->thold == 0)
+ tims->thold = 1;
+ else if (tims->thold > FSMC_THOLD_MASK)
+ tims->thold = FSMC_THOLD_MASK;
+
+ twait = max(sdrt->tRP_min, sdrt->tWP_min);
+ tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
+ if (tims->twait == 0)
+ tims->twait = 1;
+ else if (tims->twait > FSMC_TWAIT_MASK)
+ tims->twait = FSMC_TWAIT_MASK;
+
+ tset = max(sdrt->tCS_min - sdrt->tWP_min,
+ sdrt->tCEA_max - sdrt->tREA_max);
+ tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
+ if (tims->tset == 0)
+ tims->tset = 1;
+ else if (tims->tset > FSMC_TSET_MASK)
+ tims->tset = FSMC_TSET_MASK;
+
+ return 0;
+}
+
+static int fsmc_setup_data_interface(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct fsmc_nand_data *host = nand_get_controller_data(nand);
+ struct fsmc_nand_timings tims;
+ const struct nand_sdr_timings *sdrt;
+ int ret;
+
+ sdrt = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdrt))
+ return PTR_ERR(sdrt);
+
+ ret = fsmc_calc_timings(host, sdrt, &tims);
+ if (ret)
+ return ret;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ fsmc_nand_setup(host, &tims);
+
+ return 0;
+}
+
/*
* fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
*/
@@ -796,10 +865,8 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
return -ENOMEM;
ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings,
sizeof(*host->dev_timings));
- if (ret) {
- dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n");
+ if (ret)
host->dev_timings = NULL;
- }
/* Set default NAND bank to 0 */
host->bank = 0;
@@ -933,9 +1000,10 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
break;
}
- fsmc_nand_setup(host->regs_va, host->bank,
- nand->options & NAND_BUSWIDTH_16,
- host->dev_timings);
+ if (host->dev_timings)
+ fsmc_nand_setup(host, host->dev_timings);
+ else
+ nand->setup_data_interface = fsmc_setup_data_interface;
if (AMBA_REV_BITS(host->pid) >= 8) {
nand->ecc.read_page = fsmc_read_page_hwecc;
@@ -986,6 +1054,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
break;
}
+ case NAND_ECC_ON_DIE:
+ break;
+
default:
dev_err(&pdev->dev, "Unsupported ECC mode!\n");
goto err_probe;
@@ -1073,9 +1144,8 @@ static int fsmc_nand_resume(struct device *dev)
struct fsmc_nand_data *host = dev_get_drvdata(dev);
if (host) {
clk_prepare_enable(host->clk);
- fsmc_nand_setup(host->regs_va, host->bank,
- host->nand.options & NAND_BUSWIDTH_16,
- host->dev_timings);
+ if (host->dev_timings)
+ fsmc_nand_setup(host, host->dev_timings);
}
return 0;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 141bd70a49c2..97787246af41 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -26,7 +26,7 @@
#include "gpmi-regs.h"
#include "bch-regs.h"
-static struct timing_threshod timing_default_threshold = {
+static struct timing_threshold timing_default_threshold = {
.max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
BP_GPMI_TIMING0_DATA_SETUP),
.internal_data_setup_in_ns = 0,
@@ -329,7 +329,7 @@ static unsigned int ns_to_cycles(unsigned int time,
static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
struct gpmi_nfc_hardware_timing *hw)
{
- struct timing_threshod *nfc = &timing_default_threshold;
+ struct timing_threshold *nfc = &timing_default_threshold;
struct resources *r = &this->resources;
struct nand_chip *nand = &this->nand;
struct nand_timing target = this->timing;
@@ -932,7 +932,7 @@ static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
nand->select_chip(mtd, 0);
- /* [1] send SET FEATURE commond to NAND */
+ /* [1] send SET FEATURE command to NAND */
feature[0] = mode;
ret = nand->onfi_set_features(mtd, nand,
ONFI_FEATURE_ADDR_TIMING_MODE, feature);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index d52139635b67..50f8d4a1b983 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -82,6 +82,10 @@ static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
+static const char * const gpmi_clks_for_mx2x[] = {
+ "gpmi_io",
+};
+
static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
.ecc = gpmi_ooblayout_ecc,
.free = gpmi_ooblayout_free,
@@ -91,24 +95,48 @@ static const struct gpmi_devdata gpmi_devdata_imx23 = {
.type = IS_MX23,
.bch_max_ecc_strength = 20,
.max_chain_delay = 16,
+ .clks = gpmi_clks_for_mx2x,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
};
static const struct gpmi_devdata gpmi_devdata_imx28 = {
.type = IS_MX28,
.bch_max_ecc_strength = 20,
.max_chain_delay = 16,
+ .clks = gpmi_clks_for_mx2x,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
+};
+
+static const char * const gpmi_clks_for_mx6[] = {
+ "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
};
static const struct gpmi_devdata gpmi_devdata_imx6q = {
.type = IS_MX6Q,
.bch_max_ecc_strength = 40,
.max_chain_delay = 12,
+ .clks = gpmi_clks_for_mx6,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
};
static const struct gpmi_devdata gpmi_devdata_imx6sx = {
.type = IS_MX6SX,
.bch_max_ecc_strength = 62,
.max_chain_delay = 12,
+ .clks = gpmi_clks_for_mx6,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
+};
+
+static const char * const gpmi_clks_for_mx7d[] = {
+ "gpmi_io", "gpmi_bch_apb",
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx7d = {
+ .type = IS_MX7D,
+ .bch_max_ecc_strength = 62,
+ .max_chain_delay = 12,
+ .clks = gpmi_clks_for_mx7d,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
};
static irqreturn_t bch_irq(int irq, void *cookie)
@@ -599,35 +627,14 @@ acquire_err:
return -EINVAL;
}
-static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
- "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
-};
-
static int gpmi_get_clks(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
- char **extra_clks = NULL;
struct clk *clk;
int err, i;
- /* The main clock is stored in the first. */
- r->clock[0] = devm_clk_get(this->dev, "gpmi_io");
- if (IS_ERR(r->clock[0])) {
- err = PTR_ERR(r->clock[0]);
- goto err_clock;
- }
-
- /* Get extra clocks */
- if (GPMI_IS_MX6(this))
- extra_clks = extra_clks_for_mx6q;
- if (!extra_clks)
- return 0;
-
- for (i = 1; i < GPMI_CLK_MAX; i++) {
- if (extra_clks[i - 1] == NULL)
- break;
-
- clk = devm_clk_get(this->dev, extra_clks[i - 1]);
+ for (i = 0; i < this->devdata->clks_count; i++) {
+ clk = devm_clk_get(this->dev, this->devdata->clks[i]);
if (IS_ERR(clk)) {
err = PTR_ERR(clk);
goto err_clock;
@@ -1929,12 +1936,6 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
return gpmi_alloc_dma_buffer(this);
}
-static void gpmi_nand_exit(struct gpmi_nand_data *this)
-{
- nand_release(nand_to_mtd(&this->nand));
- gpmi_free_dma_buffer(this);
-}
-
static int gpmi_init_last(struct gpmi_nand_data *this)
{
struct nand_chip *chip = &this->nand;
@@ -2048,18 +2049,20 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
ret = nand_boot_init(this);
if (ret)
- goto err_out;
+ goto err_nand_cleanup;
ret = chip->scan_bbt(mtd);
if (ret)
- goto err_out;
+ goto err_nand_cleanup;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
- goto err_out;
+ goto err_nand_cleanup;
return 0;
+err_nand_cleanup:
+ nand_cleanup(chip);
err_out:
- gpmi_nand_exit(this);
+ gpmi_free_dma_buffer(this);
return ret;
}
@@ -2076,6 +2079,9 @@ static const struct of_device_id gpmi_nand_id_table[] = {
}, {
.compatible = "fsl,imx6sx-gpmi-nand",
.data = &gpmi_devdata_imx6sx,
+ }, {
+ .compatible = "fsl,imx7d-gpmi-nand",
+ .data = &gpmi_devdata_imx7d,
}, {}
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
@@ -2129,7 +2135,8 @@ static int gpmi_nand_remove(struct platform_device *pdev)
{
struct gpmi_nand_data *this = platform_get_drvdata(pdev);
- gpmi_nand_exit(this);
+ nand_release(nand_to_mtd(&this->nand));
+ gpmi_free_dma_buffer(this);
release_resources(this);
return 0;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 4e49a1f5fa27..9df0ad64e7e0 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -123,13 +123,16 @@ enum gpmi_type {
IS_MX23,
IS_MX28,
IS_MX6Q,
- IS_MX6SX
+ IS_MX6SX,
+ IS_MX7D,
};
struct gpmi_devdata {
enum gpmi_type type;
int bch_max_ecc_strength;
int max_chain_delay; /* See the async EDO mode */
+ const char * const *clks;
+ const int clks_count;
};
struct gpmi_nand_data {
@@ -231,7 +234,7 @@ struct gpmi_nfc_hardware_timing {
};
/**
- * struct timing_threshod - Timing threshold
+ * struct timing_threshold - Timing threshold
* @max_data_setup_cycles: The maximum number of data setup cycles that
* can be expressed in the hardware.
* @internal_data_setup_in_ns: The time, in ns, that the NFC hardware requires
@@ -253,7 +256,7 @@ struct gpmi_nfc_hardware_timing {
* progress, this is the clock frequency during
* the most recent I/O transaction.
*/
-struct timing_threshod {
+struct timing_threshold {
const unsigned int max_chip_count;
const unsigned int max_data_setup_cycles;
const unsigned int internal_data_setup_in_ns;
@@ -305,6 +308,8 @@ void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
#define GPMI_IS_MX28(x) ((x)->devdata->type == IS_MX28)
#define GPMI_IS_MX6Q(x) ((x)->devdata->type == IS_MX6Q)
#define GPMI_IS_MX6SX(x) ((x)->devdata->type == IS_MX6SX)
+#define GPMI_IS_MX7D(x) ((x)->devdata->type == IS_MX7D)
-#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x))
+#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x) || \
+ GPMI_IS_MX7D(x))
#endif
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index e40364eeb556..530caa80b1b6 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -764,6 +764,8 @@ static int hisi_nfc_probe(struct platform_device *pdev)
chip->write_buf = hisi_nfc_write_buf;
chip->read_buf = hisi_nfc_read_buf;
chip->chip_delay = HINFC504_CHIP_DELAY;
+ chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
+ chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
hisi_nfc_host_init(host);
diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c
index a39bb70175ee..8bc835f71b26 100644
--- a/drivers/mtd/nand/jz4780_nand.c
+++ b/drivers/mtd/nand/jz4780_nand.c
@@ -205,7 +205,7 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
return -EINVAL;
}
- mtd->ooblayout = &nand_ooblayout_lp_ops;
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
return 0;
}
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 6d6eaed2d20c..0e86fb6277c3 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -708,6 +708,8 @@ static int mpc5121_nfc_probe(struct platform_device *op)
chip->read_buf = mpc5121_nfc_read_buf;
chip->write_buf = mpc5121_nfc_write_buf;
chip->select_chip = mpc5121_nfc_select_chip;
+ chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
+ chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->ecc.mode = NAND_ECC_SOFT;
chip->ecc.algo = NAND_ECC_HAMMING;
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index dbf256217b3e..6c3a4aab0b48 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -28,36 +28,16 @@
#define ECC_IDLE_MASK BIT(0)
#define ECC_IRQ_EN BIT(0)
+#define ECC_PG_IRQ_SEL BIT(1)
#define ECC_OP_ENABLE (1)
#define ECC_OP_DISABLE (0)
#define ECC_ENCCON (0x00)
#define ECC_ENCCNFG (0x04)
-#define ECC_CNFG_4BIT (0)
-#define ECC_CNFG_6BIT (1)
-#define ECC_CNFG_8BIT (2)
-#define ECC_CNFG_10BIT (3)
-#define ECC_CNFG_12BIT (4)
-#define ECC_CNFG_14BIT (5)
-#define ECC_CNFG_16BIT (6)
-#define ECC_CNFG_18BIT (7)
-#define ECC_CNFG_20BIT (8)
-#define ECC_CNFG_22BIT (9)
-#define ECC_CNFG_24BIT (0xa)
-#define ECC_CNFG_28BIT (0xb)
-#define ECC_CNFG_32BIT (0xc)
-#define ECC_CNFG_36BIT (0xd)
-#define ECC_CNFG_40BIT (0xe)
-#define ECC_CNFG_44BIT (0xf)
-#define ECC_CNFG_48BIT (0x10)
-#define ECC_CNFG_52BIT (0x11)
-#define ECC_CNFG_56BIT (0x12)
-#define ECC_CNFG_60BIT (0x13)
#define ECC_MODE_SHIFT (5)
#define ECC_MS_SHIFT (16)
#define ECC_ENCDIADDR (0x08)
#define ECC_ENCIDLE (0x0C)
-#define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32))
#define ECC_ENCIRQ_EN (0x80)
#define ECC_ENCIRQ_STA (0x84)
#define ECC_DECCON (0x100)
@@ -66,7 +46,6 @@
#define DEC_CNFG_CORRECT (0x3 << 12)
#define ECC_DECIDLE (0x10C)
#define ECC_DECENUM0 (0x114)
-#define ERR_MASK (0x3f)
#define ECC_DECDONE (0x124)
#define ECC_DECIRQ_EN (0x200)
#define ECC_DECIRQ_STA (0x204)
@@ -78,8 +57,17 @@
#define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
+struct mtk_ecc_caps {
+ u32 err_mask;
+ const u8 *ecc_strength;
+ u8 num_ecc_strength;
+ u32 encode_parity_reg0;
+ int pg_irq_sel;
+};
+
struct mtk_ecc {
struct device *dev;
+ const struct mtk_ecc_caps *caps;
void __iomem *regs;
struct clk *clk;
@@ -87,7 +75,18 @@ struct mtk_ecc {
struct mutex lock;
u32 sectors;
- u8 eccdata[112];
+ u8 *eccdata;
+};
+
+/* ecc strength that each IP supports */
+static const u8 ecc_strength_mt2701[] = {
+ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
+ 40, 44, 48, 52, 56, 60
+};
+
+static const u8 ecc_strength_mt2712[] = {
+ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
+ 40, 44, 48, 52, 56, 60, 68, 72, 80
};
static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
@@ -136,77 +135,24 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
return IRQ_HANDLED;
}
-static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
{
- u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
- u32 reg;
-
- switch (config->strength) {
- case 4:
- ecc_bit = ECC_CNFG_4BIT;
- break;
- case 6:
- ecc_bit = ECC_CNFG_6BIT;
- break;
- case 8:
- ecc_bit = ECC_CNFG_8BIT;
- break;
- case 10:
- ecc_bit = ECC_CNFG_10BIT;
- break;
- case 12:
- ecc_bit = ECC_CNFG_12BIT;
- break;
- case 14:
- ecc_bit = ECC_CNFG_14BIT;
- break;
- case 16:
- ecc_bit = ECC_CNFG_16BIT;
- break;
- case 18:
- ecc_bit = ECC_CNFG_18BIT;
- break;
- case 20:
- ecc_bit = ECC_CNFG_20BIT;
- break;
- case 22:
- ecc_bit = ECC_CNFG_22BIT;
- break;
- case 24:
- ecc_bit = ECC_CNFG_24BIT;
- break;
- case 28:
- ecc_bit = ECC_CNFG_28BIT;
- break;
- case 32:
- ecc_bit = ECC_CNFG_32BIT;
- break;
- case 36:
- ecc_bit = ECC_CNFG_36BIT;
- break;
- case 40:
- ecc_bit = ECC_CNFG_40BIT;
- break;
- case 44:
- ecc_bit = ECC_CNFG_44BIT;
- break;
- case 48:
- ecc_bit = ECC_CNFG_48BIT;
- break;
- case 52:
- ecc_bit = ECC_CNFG_52BIT;
- break;
- case 56:
- ecc_bit = ECC_CNFG_56BIT;
- break;
- case 60:
- ecc_bit = ECC_CNFG_60BIT;
- break;
- default:
- dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n",
+ u32 ecc_bit, dec_sz, enc_sz;
+ u32 reg, i;
+
+ for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
+ if (ecc->caps->ecc_strength[i] == config->strength)
+ break;
+ }
+
+ if (i == ecc->caps->num_ecc_strength) {
+ dev_err(ecc->dev, "invalid ecc strength %d\n",
config->strength);
+ return -EINVAL;
}
+ ecc_bit = i;
+
if (config->op == ECC_ENCODE) {
/* configure ECC encoder (in bits) */
enc_sz = config->len << 3;
@@ -232,6 +178,8 @@ static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
if (config->sectors)
ecc->sectors = 1 << (config->sectors - 1);
}
+
+ return 0;
}
void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
@@ -247,8 +195,8 @@ void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
offset = (i >> 2) << 2;
err = readl(ecc->regs + ECC_DECENUM0 + offset);
err = err >> ((i % 4) * 8);
- err &= ERR_MASK;
- if (err == ERR_MASK) {
+ err &= ecc->caps->err_mask;
+ if (err == ecc->caps->err_mask) {
/* uncorrectable errors */
stats->failed++;
continue;
@@ -313,6 +261,7 @@ EXPORT_SYMBOL(of_mtk_ecc_get);
int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
{
enum mtk_ecc_operation op = config->op;
+ u16 reg_val;
int ret;
ret = mutex_lock_interruptible(&ecc->lock);
@@ -322,11 +271,27 @@ int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
}
mtk_ecc_wait_idle(ecc, op);
- mtk_ecc_config(ecc, config);
- writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
- init_completion(&ecc->done);
- writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op));
+ ret = mtk_ecc_config(ecc, config);
+ if (ret) {
+ mutex_unlock(&ecc->lock);
+ return ret;
+ }
+
+ if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) {
+ init_completion(&ecc->done);
+ reg_val = ECC_IRQ_EN;
+ /*
+ * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it
+ * means this chip can only generate one ecc irq during page
+ * read / write. If is 0, generate one ecc irq each ecc step.
+ */
+ if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
+ reg_val |= ECC_PG_IRQ_SEL;
+ writew(reg_val, ecc->regs + ECC_IRQ_REG(op));
+ }
+
+ writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
return 0;
}
@@ -396,7 +361,9 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
/* write the parity bytes generated by the ECC back to temp buffer */
- __ioread32_copy(ecc->eccdata, ecc->regs + ECC_ENCPAR(0), round_up(len, 4));
+ __ioread32_copy(ecc->eccdata,
+ ecc->regs + ecc->caps->encode_parity_reg0,
+ round_up(len, 4));
/* copy into possibly unaligned OOB region with actual length */
memcpy(data + bytes, ecc->eccdata, len);
@@ -409,37 +376,79 @@ timeout:
}
EXPORT_SYMBOL(mtk_ecc_encode);
-void mtk_ecc_adjust_strength(u32 *p)
+void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
{
- u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
- 40, 44, 48, 52, 56, 60};
+ const u8 *ecc_strength = ecc->caps->ecc_strength;
int i;
- for (i = 0; i < ARRAY_SIZE(ecc); i++) {
- if (*p <= ecc[i]) {
+ for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
+ if (*p <= ecc_strength[i]) {
if (!i)
- *p = ecc[i];
- else if (*p != ecc[i])
- *p = ecc[i - 1];
+ *p = ecc_strength[i];
+ else if (*p != ecc_strength[i])
+ *p = ecc_strength[i - 1];
return;
}
}
- *p = ecc[ARRAY_SIZE(ecc) - 1];
+ *p = ecc_strength[ecc->caps->num_ecc_strength - 1];
}
EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
+ .err_mask = 0x3f,
+ .ecc_strength = ecc_strength_mt2701,
+ .num_ecc_strength = 20,
+ .encode_parity_reg0 = 0x10,
+ .pg_irq_sel = 0,
+};
+
+static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
+ .err_mask = 0x7f,
+ .ecc_strength = ecc_strength_mt2712,
+ .num_ecc_strength = 23,
+ .encode_parity_reg0 = 0x300,
+ .pg_irq_sel = 1,
+};
+
+static const struct of_device_id mtk_ecc_dt_match[] = {
+ {
+ .compatible = "mediatek,mt2701-ecc",
+ .data = &mtk_ecc_caps_mt2701,
+ }, {
+ .compatible = "mediatek,mt2712-ecc",
+ .data = &mtk_ecc_caps_mt2712,
+ },
+ {},
+};
+
static int mtk_ecc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_ecc *ecc;
struct resource *res;
+ const struct of_device_id *of_ecc_id = NULL;
+ u32 max_eccdata_size;
int irq, ret;
ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
if (!ecc)
return -ENOMEM;
+ of_ecc_id = of_match_device(mtk_ecc_dt_match, &pdev->dev);
+ if (!of_ecc_id)
+ return -ENODEV;
+
+ ecc->caps = of_ecc_id->data;
+
+ max_eccdata_size = ecc->caps->num_ecc_strength - 1;
+ max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
+ max_eccdata_size = (max_eccdata_size * ECC_PARITY_BITS + 7) >> 3;
+ max_eccdata_size = round_up(max_eccdata_size, 4);
+ ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
+ if (!ecc->eccdata)
+ return -ENOMEM;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ecc->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(ecc->regs)) {
@@ -500,19 +509,12 @@ static int mtk_ecc_resume(struct device *dev)
return ret;
}
- mtk_ecc_hw_init(ecc);
-
return 0;
}
static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
#endif
-static const struct of_device_id mtk_ecc_dt_match[] = {
- { .compatible = "mediatek,mt2701-ecc" },
- {},
-};
-
MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
static struct platform_driver mtk_ecc_driver = {
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
index cbeba5cd1c13..d245c14f1b80 100644
--- a/drivers/mtd/nand/mtk_ecc.h
+++ b/drivers/mtd/nand/mtk_ecc.h
@@ -42,7 +42,7 @@ void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
void mtk_ecc_disable(struct mtk_ecc *);
-void mtk_ecc_adjust_strength(u32 *);
+void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
void mtk_ecc_release(struct mtk_ecc *);
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index 6c517c682939..f7ae99464375 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include "mtk_ecc.h"
/* NAND controller register definition */
@@ -38,23 +39,6 @@
#define NFI_PAGEFMT (0x04)
#define PAGEFMT_FDM_ECC_SHIFT (12)
#define PAGEFMT_FDM_SHIFT (8)
-#define PAGEFMT_SPARE_16 (0)
-#define PAGEFMT_SPARE_26 (1)
-#define PAGEFMT_SPARE_27 (2)
-#define PAGEFMT_SPARE_28 (3)
-#define PAGEFMT_SPARE_32 (4)
-#define PAGEFMT_SPARE_36 (5)
-#define PAGEFMT_SPARE_40 (6)
-#define PAGEFMT_SPARE_44 (7)
-#define PAGEFMT_SPARE_48 (8)
-#define PAGEFMT_SPARE_49 (9)
-#define PAGEFMT_SPARE_50 (0xa)
-#define PAGEFMT_SPARE_51 (0xb)
-#define PAGEFMT_SPARE_52 (0xc)
-#define PAGEFMT_SPARE_62 (0xd)
-#define PAGEFMT_SPARE_63 (0xe)
-#define PAGEFMT_SPARE_64 (0xf)
-#define PAGEFMT_SPARE_SHIFT (4)
#define PAGEFMT_SEC_SEL_512 BIT(2)
#define PAGEFMT_512_2K (0)
#define PAGEFMT_2K_4K (1)
@@ -115,6 +99,17 @@
#define MTK_RESET_TIMEOUT (1000000)
#define MTK_MAX_SECTOR (16)
#define MTK_NAND_MAX_NSELS (2)
+#define MTK_NFC_MIN_SPARE (16)
+#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
+ ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
+ (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
+
+struct mtk_nfc_caps {
+ const u8 *spare_size;
+ u8 num_spare_size;
+ u8 pageformat_spare_shift;
+ u8 nfi_clk_div;
+};
struct mtk_nfc_bad_mark_ctl {
void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
@@ -155,6 +150,7 @@ struct mtk_nfc {
struct mtk_ecc *ecc;
struct device *dev;
+ const struct mtk_nfc_caps *caps;
void __iomem *regs;
struct completion done;
@@ -163,6 +159,20 @@ struct mtk_nfc {
u8 *buffer;
};
+/*
+ * supported spare size of each IP.
+ * order should be the same with the spare size bitfiled defination of
+ * register NFI_PAGEFMT.
+ */
+static const u8 spare_size_mt2701[] = {
+ 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 63, 64
+};
+
+static const u8 spare_size_mt2712[] = {
+ 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67,
+ 74
+};
+
static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
{
return container_of(nand, struct mtk_nfc_nand_chip, nand);
@@ -308,7 +318,7 @@ static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
- u32 fmt, spare;
+ u32 fmt, spare, i;
if (!mtd->writesize)
return 0;
@@ -352,63 +362,21 @@ static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
if (chip->ecc.size == 1024)
spare >>= 1;
- switch (spare) {
- case 16:
- fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
- break;
- case 26:
- fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
- break;
- case 27:
- fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
- break;
- case 28:
- fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
- break;
- case 32:
- fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
- break;
- case 36:
- fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
- break;
- case 40:
- fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
- break;
- case 44:
- fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
- break;
- case 48:
- fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
- break;
- case 49:
- fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
- break;
- case 50:
- fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
- break;
- case 51:
- fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
- break;
- case 52:
- fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
- break;
- case 62:
- fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
- break;
- case 63:
- fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
- break;
- case 64:
- fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
- break;
- default:
- dev_err(nfc->dev, "invalid spare per sector %d\n", spare);
+ for (i = 0; i < nfc->caps->num_spare_size; i++) {
+ if (nfc->caps->spare_size[i] == spare)
+ break;
+ }
+
+ if (i == nfc->caps->num_spare_size) {
+ dev_err(nfc->dev, "invalid spare size %d\n", spare);
return -EINVAL;
}
+ fmt |= i << nfc->caps->pageformat_spare_shift;
+
fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
- nfi_writew(nfc, fmt, NFI_PAGEFMT);
+ nfi_writel(nfc, fmt, NFI_PAGEFMT);
nfc->ecc_cfg.strength = chip->ecc.strength;
nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
@@ -531,6 +499,74 @@ static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
mtk_nfc_write_byte(mtd, buf[i]);
}
+static int mtk_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+ const struct nand_sdr_timings *timings;
+ u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ rate = clk_get_rate(nfc->clk.nfi_clk);
+ /* There is a frequency divider in some IPs */
+ rate /= nfc->caps->nfi_clk_div;
+
+ /* turn clock rate into KHZ */
+ rate /= 1000;
+
+ tpoecs = max(timings->tALH_min, timings->tCLH_min) / 1000;
+ tpoecs = DIV_ROUND_UP(tpoecs * rate, 1000000);
+ tpoecs &= 0xf;
+
+ tprecs = max(timings->tCLS_min, timings->tALS_min) / 1000;
+ tprecs = DIV_ROUND_UP(tprecs * rate, 1000000);
+ tprecs &= 0x3f;
+
+ /* sdr interface has no tCR which means CE# low to RE# low */
+ tc2r = 0;
+
+ tw2r = timings->tWHR_min / 1000;
+ tw2r = DIV_ROUND_UP(tw2r * rate, 1000000);
+ tw2r = DIV_ROUND_UP(tw2r - 1, 2);
+ tw2r &= 0xf;
+
+ twh = max(timings->tREH_min, timings->tWH_min) / 1000;
+ twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
+ twh &= 0xf;
+
+ twst = timings->tWP_min / 1000;
+ twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
+ twst &= 0xf;
+
+ trlt = max(timings->tREA_max, timings->tRP_min) / 1000;
+ trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
+ trlt &= 0xf;
+
+ /*
+ * ACCON: access timing control register
+ * -------------------------------------
+ * 31:28: tpoecs, minimum required time for CS post pulling down after
+ * accessing the device
+ * 27:22: tprecs, minimum required time for CS pre pulling down before
+ * accessing the device
+ * 21:16: tc2r, minimum required time from NCEB low to NREB low
+ * 15:12: tw2r, minimum required time from NWEB high to NREB low.
+ * 11:08: twh, write enable hold time
+ * 07:04: twst, write wait states
+ * 03:00: trlt, read wait states
+ */
+ trlt = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
+ nfi_writel(nfc, trlt, NFI_ACCCON);
+
+ return 0;
+}
+
static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
@@ -988,28 +1024,13 @@ static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
{
/*
- * ACCON: access timing control register
- * -------------------------------------
- * 31:28: minimum required time for CS post pulling down after accessing
- * the device
- * 27:22: minimum required time for CS pre pulling down before accessing
- * the device
- * 21:16: minimum required time from NCEB low to NREB low
- * 15:12: minimum required time from NWEB high to NREB low.
- * 11:08: write enable hold time
- * 07:04: write wait states
- * 03:00: read wait states
- */
- nfi_writel(nfc, 0x10804211, NFI_ACCCON);
-
- /*
* CNRNB: nand ready/busy register
* -------------------------------
* 7:4: timeout register for polling the NAND busy/ready signal
* 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
*/
nfi_writew(nfc, 0xf1, NFI_CNRNB);
- nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
+ nfi_writel(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
mtk_nfc_hw_reset(nfc);
@@ -1131,12 +1152,12 @@ static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
}
}
-static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
+static int mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
- u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
- 48, 49, 50, 51, 52, 62, 63, 64};
- u32 eccsteps, i;
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
+ const u8 *spare = nfc->caps->spare_size;
+ u32 eccsteps, i, closest_spare = 0;
eccsteps = mtd->writesize / nand->ecc.size;
*sps = mtd->oobsize / eccsteps;
@@ -1144,28 +1165,31 @@ static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
if (nand->ecc.size == 1024)
*sps >>= 1;
- for (i = 0; i < ARRAY_SIZE(spare); i++) {
- if (*sps <= spare[i]) {
- if (!i)
- *sps = spare[i];
- else if (*sps != spare[i])
- *sps = spare[i - 1];
- break;
+ if (*sps < MTK_NFC_MIN_SPARE)
+ return -EINVAL;
+
+ for (i = 0; i < nfc->caps->num_spare_size; i++) {
+ if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
+ closest_spare = i;
+ if (*sps == spare[i])
+ break;
}
}
- if (i >= ARRAY_SIZE(spare))
- *sps = spare[ARRAY_SIZE(spare) - 1];
+ *sps = spare[closest_spare];
if (nand->ecc.size == 1024)
*sps <<= 1;
+
+ return 0;
}
static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
u32 spare;
- int free;
+ int free, ret;
/* support only ecc hw mode */
if (nand->ecc.mode != NAND_ECC_HW) {
@@ -1194,7 +1218,9 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
nand->ecc.size = 1024;
}
- mtk_nfc_set_spare_per_sector(&spare, mtd);
+ ret = mtk_nfc_set_spare_per_sector(&spare, mtd);
+ if (ret)
+ return ret;
/* calculate oob bytes except ecc parity data */
free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
@@ -1214,7 +1240,7 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
}
}
- mtk_ecc_adjust_strength(&nand->ecc.strength);
+ mtk_ecc_adjust_strength(nfc->ecc, &nand->ecc.strength);
dev_info(dev, "eccsize %d eccstrength %d\n",
nand->ecc.size, nand->ecc.strength);
@@ -1271,6 +1297,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
nand->read_byte = mtk_nfc_read_byte;
nand->read_buf = mtk_nfc_read_buf;
nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
+ nand->setup_data_interface = mtk_nfc_setup_data_interface;
/* set default mode in case dt entry is missing */
nand->ecc.mode = NAND_ECC_HW;
@@ -1312,7 +1339,10 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
return -EINVAL;
}
- mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
+ ret = mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
+ if (ret)
+ return ret;
+
mtk_nfc_set_fdm(&chip->fdm, mtd);
mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
@@ -1354,12 +1384,39 @@ static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
return 0;
}
+static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
+ .spare_size = spare_size_mt2701,
+ .num_spare_size = 16,
+ .pageformat_spare_shift = 4,
+ .nfi_clk_div = 1,
+};
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
+ .spare_size = spare_size_mt2712,
+ .num_spare_size = 19,
+ .pageformat_spare_shift = 16,
+ .nfi_clk_div = 2,
+};
+
+static const struct of_device_id mtk_nfc_id_table[] = {
+ {
+ .compatible = "mediatek,mt2701-nfc",
+ .data = &mtk_nfc_caps_mt2701,
+ }, {
+ .compatible = "mediatek,mt2712-nfc",
+ .data = &mtk_nfc_caps_mt2712,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
+
static int mtk_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct mtk_nfc *nfc;
struct resource *res;
+ const struct of_device_id *of_nfc_id = NULL;
int ret, irq;
nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
@@ -1423,6 +1480,14 @@ static int mtk_nfc_probe(struct platform_device *pdev)
goto clk_disable;
}
+ of_nfc_id = of_match_device(mtk_nfc_id_table, &pdev->dev);
+ if (!of_nfc_id) {
+ ret = -ENODEV;
+ goto clk_disable;
+ }
+
+ nfc->caps = of_nfc_id->data;
+
platform_set_drvdata(pdev, nfc);
ret = mtk_nfc_nand_chips_init(dev, nfc);
@@ -1485,8 +1550,6 @@ static int mtk_nfc_resume(struct device *dev)
if (ret)
return ret;
- mtk_nfc_hw_init(nfc);
-
/* reset NAND chip if VCC was powered off */
list_for_each_entry(chip, &nfc->chips, node) {
nand = &chip->nand;
@@ -1503,12 +1566,6 @@ static int mtk_nfc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
#endif
-static const struct of_device_id mtk_nfc_id_table[] = {
- { .compatible = "mediatek,mt2701-nfc" },
- {}
-};
-MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
-
static struct platform_driver mtk_nfc_driver = {
.probe = mtk_nfc_probe,
.remove = mtk_nfc_remove,
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 61ca020c5272..a764d5ca7536 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -152,9 +152,8 @@ struct mxc_nand_devtype_data {
void (*select_chip)(struct mtd_info *mtd, int chip);
int (*correct_data)(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc);
- int (*setup_data_interface)(struct mtd_info *mtd,
- const struct nand_data_interface *conf,
- bool check_only);
+ int (*setup_data_interface)(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf);
/*
* On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
@@ -1015,9 +1014,8 @@ static void preset_v1(struct mtd_info *mtd)
writew(0x4, NFC_V1_V2_WRPROT);
}
-static int mxc_nand_v2_setup_data_interface(struct mtd_info *mtd,
- const struct nand_data_interface *conf,
- bool check_only)
+static int mxc_nand_v2_setup_data_interface(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
@@ -1075,7 +1073,7 @@ static int mxc_nand_v2_setup_data_interface(struct mtd_info *mtd,
return -EINVAL;
}
- if (check_only)
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
ret = clk_set_rate(host->clk, rate);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index bf8486c406d3..5fa5ddc94834 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -755,6 +755,16 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
return;
/* This applies to read commands */
+ case NAND_CMD_READ0:
+ /*
+ * READ0 is sometimes used to exit GET STATUS mode. When this
+ * is the case no address cycles are requested, and we can use
+ * this information to detect that we should not wait for the
+ * device to be ready.
+ */
+ if (column == -1 && page_addr == -1)
+ return;
+
default:
/*
* If we don't have access to the busy pin, we apply the given
@@ -889,6 +899,15 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
return;
case NAND_CMD_READ0:
+ /*
+ * READ0 is sometimes used to exit GET STATUS mode. When this
+ * is the case no address cycles are requested, and we can use
+ * this information to detect that READSTART should not be
+ * issued.
+ */
+ if (column == -1 && page_addr == -1)
+ return;
+
chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd, NAND_CMD_NONE,
@@ -1044,12 +1063,13 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
/**
* nand_reset_data_interface - Reset data interface and timings
* @chip: The NAND chip
+ * @chipnr: Internal die id
*
* Reset the Data interface and timings to ONFI mode 0.
*
* Returns 0 for success or negative error code otherwise.
*/
-static int nand_reset_data_interface(struct nand_chip *chip)
+static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_data_interface *conf;
@@ -1073,7 +1093,7 @@ static int nand_reset_data_interface(struct nand_chip *chip)
*/
conf = nand_get_default_data_interface();
- ret = chip->setup_data_interface(mtd, conf, false);
+ ret = chip->setup_data_interface(mtd, chipnr, conf);
if (ret)
pr_err("Failed to configure data interface to SDR timing mode 0\n");
@@ -1083,6 +1103,7 @@ static int nand_reset_data_interface(struct nand_chip *chip)
/**
* nand_setup_data_interface - Setup the best data interface and timings
* @chip: The NAND chip
+ * @chipnr: Internal die id
*
* Find and configure the best data interface and NAND timings supported by
* the chip and the driver.
@@ -1092,7 +1113,7 @@ static int nand_reset_data_interface(struct nand_chip *chip)
*
* Returns 0 for success or negative error code otherwise.
*/
-static int nand_setup_data_interface(struct nand_chip *chip)
+static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
@@ -1116,7 +1137,7 @@ static int nand_setup_data_interface(struct nand_chip *chip)
goto err;
}
- ret = chip->setup_data_interface(mtd, chip->data_interface, false);
+ ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
err:
return ret;
}
@@ -1167,8 +1188,10 @@ static int nand_init_data_interface(struct nand_chip *chip)
if (ret)
continue;
- ret = chip->setup_data_interface(mtd, chip->data_interface,
- true);
+ /* Pass -1 to only */
+ ret = chip->setup_data_interface(mtd,
+ NAND_DATA_IFACE_CHECK_ONLY,
+ chip->data_interface);
if (!ret) {
chip->onfi_timing_mode_default = mode;
break;
@@ -1195,7 +1218,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
- ret = nand_reset_data_interface(chip);
+ ret = nand_reset_data_interface(chip, chipnr);
if (ret)
return ret;
@@ -1208,7 +1231,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
- ret = nand_setup_data_interface(chip);
+ ret = nand_setup_data_interface(chip, chipnr);
chip->select_chip(mtd, -1);
if (ret)
return ret;
@@ -1424,7 +1447,10 @@ static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
for (; len >= sizeof(long);
len -= sizeof(long), bitmap += sizeof(long)) {
- weight = hweight_long(*((unsigned long *)bitmap));
+ unsigned long d = *((unsigned long *)bitmap);
+ if (d == ~0UL)
+ continue;
+ weight = hweight_long(d);
bitflips += BITS_PER_LONG - weight;
if (unlikely(bitflips > bitflips_threshold))
return -EBADMSG;
@@ -1527,14 +1553,15 @@ EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
-static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
{
chip->read_buf(mtd, buf, mtd->writesize);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
+EXPORT_SYMBOL(nand_read_page_raw);
/**
* nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
@@ -2472,8 +2499,8 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
-static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
{
chip->write_buf(mtd, buf, mtd->writesize);
if (oob_required)
@@ -2481,6 +2508,7 @@ static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
+EXPORT_SYMBOL(nand_write_page_raw);
/**
* nand_write_page_raw_syndrome - [INTERN] raw page write function
@@ -2718,7 +2746,7 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
*/
static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int cached, int raw)
+ int oob_required, int page, int raw)
{
int status, subpage;
@@ -2744,30 +2772,12 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (status < 0)
return status;
- /*
- * Cached progamming disabled for now. Not sure if it's worth the
- * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
- */
- cached = 0;
+ if (nand_standard_page_accessors(&chip->ecc)) {
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- if (!cached || !NAND_HAS_CACHEPROG(chip)) {
-
- if (nand_standard_page_accessors(&chip->ecc))
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
- /*
- * See if operation failed and additional status checks are
- * available.
- */
- if ((status & NAND_STATUS_FAIL) && (chip->errstat))
- status = chip->errstat(mtd, chip, FL_WRITING, status,
- page);
-
if (status & NAND_STATUS_FAIL)
return -EIO;
- } else {
- chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
}
return 0;
@@ -2875,7 +2885,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
while (1) {
int bytes = mtd->writesize;
- int cached = writelen > bytes && page != blockmask;
uint8_t *wbuf = buf;
int use_bufpoi;
int part_pagewr = (column || writelen < mtd->writesize);
@@ -2893,7 +2902,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (use_bufpoi) {
pr_debug("%s: using write bounce buffer for buf@%p\n",
__func__, buf);
- cached = 0;
if (part_pagewr)
bytes = min_t(int, bytes - column, writelen);
chip->pagebuf = -1;
@@ -2912,7 +2920,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
}
ret = nand_write_page(mtd, chip, column, bytes, wbuf,
- oob_required, page, cached,
+ oob_required, page,
(ops->mode == MTD_OPS_RAW));
if (ret)
break;
@@ -3228,14 +3236,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
status = chip->erase(mtd, page & chip->pagemask);
- /*
- * See if operation failed and additional status checks are
- * available
- */
- if ((status & NAND_STATUS_FAIL) && (chip->errstat))
- status = chip->errstat(mtd, chip, FL_ERASING,
- status, page);
-
/* See if block erase succeeded */
if (status & NAND_STATUS_FAIL) {
pr_debug("%s: failed erase, page 0x%08x\n",
@@ -3422,6 +3422,25 @@ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
+ * nand_onfi_get_set_features_notsupp - set/get features stub returning
+ * -ENOTSUPP
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ *
+ * Should be used by NAND controller drivers that do not support the SET/GET
+ * FEATURES operations.
+ */
+int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
+ struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(nand_onfi_get_set_features_notsupp);
+
+/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
* @mtd: MTD device structure
*/
@@ -4180,6 +4199,7 @@ static const char * const nand_ecc_modes[] = {
[NAND_ECC_HW] = "hw",
[NAND_ECC_HW_SYNDROME] = "hw_syndrome",
[NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
+ [NAND_ECC_ON_DIE] = "on-die",
};
static int of_get_nand_ecc_mode(struct device_node *np)
@@ -4374,7 +4394,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
* For the other dies, nand_reset() will automatically switch to the
* best mode for us.
*/
- ret = nand_setup_data_interface(chip);
+ ret = nand_setup_data_interface(chip, 0);
if (ret)
goto err_nand_init;
@@ -4512,6 +4532,226 @@ static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
}
}
+/**
+ * nand_check_ecc_caps - check the sanity of preset ECC settings
+ * @chip: nand chip info structure
+ * @caps: ECC caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * When ECC step size and strength are already set, check if they are supported
+ * by the controller and the calculated ECC bytes fit within the chip's OOB.
+ * On success, the calculated ECC bytes is set.
+ */
+int nand_check_ecc_caps(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int preset_step = chip->ecc.size;
+ int preset_strength = chip->ecc.strength;
+ int nsteps, ecc_bytes;
+ int i, j;
+
+ if (WARN_ON(oobavail < 0))
+ return -EINVAL;
+
+ if (!preset_step || !preset_strength)
+ return -ENODATA;
+
+ nsteps = mtd->writesize / preset_step;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+
+ if (stepinfo->stepsize != preset_step)
+ continue;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ if (stepinfo->strengths[j] != preset_strength)
+ continue;
+
+ ecc_bytes = caps->calc_ecc_bytes(preset_step,
+ preset_strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ return ecc_bytes;
+
+ if (ecc_bytes * nsteps > oobavail) {
+ pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
+ preset_step, preset_strength);
+ return -ENOSPC;
+ }
+
+ chip->ecc.bytes = ecc_bytes;
+
+ return 0;
+ }
+ }
+
+ pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
+ preset_step, preset_strength);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
+
+/**
+ * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * If a chip's ECC requirement is provided, try to meet it with the least
+ * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
+ * On success, the chosen ECC settings are set.
+ */
+int nand_match_ecc_req(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int req_step = chip->ecc_step_ds;
+ int req_strength = chip->ecc_strength_ds;
+ int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
+ int best_step, best_strength, best_ecc_bytes;
+ int best_ecc_bytes_total = INT_MAX;
+ int i, j;
+
+ if (WARN_ON(oobavail < 0))
+ return -EINVAL;
+
+ /* No information provided by the NAND chip */
+ if (!req_step || !req_strength)
+ return -ENOTSUPP;
+
+ /* number of correctable bits the chip requires in a page */
+ req_corr = mtd->writesize / req_step * req_strength;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+ step_size = stepinfo->stepsize;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ strength = stepinfo->strengths[j];
+
+ /*
+ * If both step size and strength are smaller than the
+ * chip's requirement, it is not easy to compare the
+ * resulted reliability.
+ */
+ if (step_size < req_step && strength < req_strength)
+ continue;
+
+ if (mtd->writesize % step_size)
+ continue;
+
+ nsteps = mtd->writesize / step_size;
+
+ ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ continue;
+ ecc_bytes_total = ecc_bytes * nsteps;
+
+ if (ecc_bytes_total > oobavail ||
+ strength * nsteps < req_corr)
+ continue;
+
+ /*
+ * We assume the best is to meet the chip's requrement
+ * with the least number of ECC bytes.
+ */
+ if (ecc_bytes_total < best_ecc_bytes_total) {
+ best_ecc_bytes_total = ecc_bytes_total;
+ best_step = step_size;
+ best_strength = strength;
+ best_ecc_bytes = ecc_bytes;
+ }
+ }
+ }
+
+ if (best_ecc_bytes_total == INT_MAX)
+ return -ENOTSUPP;
+
+ chip->ecc.size = best_step;
+ chip->ecc.strength = best_strength;
+ chip->ecc.bytes = best_ecc_bytes;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_match_ecc_req);
+
+/**
+ * nand_maximize_ecc - choose the max ECC strength available
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * Choose the max ECC strength that is supported on the controller, and can fit
+ * within the chip's OOB. On success, the chosen ECC settings are set.
+ */
+int nand_maximize_ecc(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int step_size, strength, nsteps, ecc_bytes, corr;
+ int best_corr = 0;
+ int best_step = 0;
+ int best_strength, best_ecc_bytes;
+ int i, j;
+
+ if (WARN_ON(oobavail < 0))
+ return -EINVAL;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+ step_size = stepinfo->stepsize;
+
+ /* If chip->ecc.size is already set, respect it */
+ if (chip->ecc.size && step_size != chip->ecc.size)
+ continue;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ strength = stepinfo->strengths[j];
+
+ if (mtd->writesize % step_size)
+ continue;
+
+ nsteps = mtd->writesize / step_size;
+
+ ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ continue;
+
+ if (ecc_bytes * nsteps > oobavail)
+ continue;
+
+ corr = strength * nsteps;
+
+ /*
+ * If the number of correctable bits is the same,
+ * bigger step_size has more reliability.
+ */
+ if (corr > best_corr ||
+ (corr == best_corr && step_size > best_step)) {
+ best_corr = corr;
+ best_step = step_size;
+ best_strength = strength;
+ best_ecc_bytes = ecc_bytes;
+ }
+ }
+ }
+
+ if (!best_corr)
+ return -ENOTSUPP;
+
+ chip->ecc.size = best_step;
+ chip->ecc.strength = best_strength;
+ chip->ecc.bytes = best_ecc_bytes;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_maximize_ecc);
+
/*
* Check if the chip configuration meet the datasheet requirements.
@@ -4733,6 +4973,18 @@ int nand_scan_tail(struct mtd_info *mtd)
}
break;
+ case NAND_ECC_ON_DIE:
+ if (!ecc->read_page || !ecc->write_page) {
+ WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
+ ret = -EINVAL;
+ goto err_free;
+ }
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
+ break;
+
case NAND_ECC_NONE:
pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
ecc->read_page = nand_read_page_raw;
@@ -4773,6 +5025,11 @@ int nand_scan_tail(struct mtd_info *mtd)
goto err_free;
}
ecc->total = ecc->steps * ecc->bytes;
+ if (ecc->total > mtd->oobsize) {
+ WARN(1, "Total number of ECC bytes exceeded oobsize\n");
+ ret = -EINVAL;
+ goto err_free;
+ }
/*
* The number of bytes available for a client to place data into
diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c
index 877011069251..c30ab60f8e1b 100644
--- a/drivers/mtd/nand/nand_micron.c
+++ b/drivers/mtd/nand/nand_micron.c
@@ -17,6 +17,12 @@
#include <linux/mtd/nand.h>
+/*
+ * Special Micron status bit that indicates when the block has been
+ * corrected by on-die ECC and should be rewritten
+ */
+#define NAND_STATUS_WRITE_RECOMMENDED BIT(3)
+
struct nand_onfi_vendor_micron {
u8 two_plane_read;
u8 read_cache;
@@ -66,9 +72,197 @@ static int micron_nand_onfi_init(struct nand_chip *chip)
return 0;
}
+static int micron_nand_on_die_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section >= 4)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static int micron_nand_on_die_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section >= 4)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_nand_on_die_ooblayout_ops = {
+ .ecc = micron_nand_on_die_ooblayout_ecc,
+ .free = micron_nand_on_die_ooblayout_free,
+};
+
+static int micron_nand_on_die_ecc_setup(struct nand_chip *chip, bool enable)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, };
+
+ if (enable)
+ feature[0] |= ONFI_FEATURE_ON_DIE_ECC_EN;
+
+ return chip->onfi_set_features(nand_to_mtd(chip), chip,
+ ONFI_FEATURE_ON_DIE_ECC, feature);
+}
+
+static int
+micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ int status;
+ int max_bitflips = 0;
+
+ micron_nand_on_die_ecc_setup(chip, true);
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ status = chip->read_byte(mtd);
+ if (status & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+ /*
+ * The internal ECC doesn't tell us the number of bitflips
+ * that have been corrected, but tells us if it recommends to
+ * rewrite the block. If it's the case, then we pretend we had
+ * a number of bitflips equal to the ECC strength, which will
+ * hint the NAND core to rewrite the block.
+ */
+ else if (status & NAND_STATUS_WRITE_RECOMMENDED)
+ max_bitflips = chip->ecc.strength;
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
+
+ nand_read_page_raw(mtd, chip, buf, oob_required, page);
+
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ return max_bitflips;
+}
+
+static int
+micron_nand_write_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ int status;
+
+ micron_nand_on_die_ecc_setup(chip, true);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ nand_write_page_raw(mtd, chip, buf, oob_required, page);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static int
+micron_nand_read_page_raw_on_die_ecc(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ nand_read_page_raw(mtd, chip, buf, oob_required, page);
+
+ return 0;
+}
+
+static int
+micron_nand_write_page_raw_on_die_ecc(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ int status;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ nand_write_page_raw(mtd, chip, buf, oob_required, page);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+enum {
+ /* The NAND flash doesn't support on-die ECC */
+ MICRON_ON_DIE_UNSUPPORTED,
+
+ /*
+ * The NAND flash supports on-die ECC and it can be
+ * enabled/disabled by a set features command.
+ */
+ MICRON_ON_DIE_SUPPORTED,
+
+ /*
+ * The NAND flash supports on-die ECC, and it cannot be
+ * disabled.
+ */
+ MICRON_ON_DIE_MANDATORY,
+};
+
+/*
+ * Try to detect if the NAND support on-die ECC. To do this, we enable
+ * the feature, and read back if it has been enabled as expected. We
+ * also check if it can be disabled, because some Micron NANDs do not
+ * allow disabling the on-die ECC and we don't support such NANDs for
+ * now.
+ *
+ * This function also has the side effect of disabling on-die ECC if
+ * it had been left enabled by the firmware/bootloader.
+ */
+static int micron_supports_on_die_ecc(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, };
+ int ret;
+
+ if (chip->onfi_version == 0)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ if (chip->bits_per_cell != 1)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ chip->onfi_get_features(nand_to_mtd(chip), chip,
+ ONFI_FEATURE_ON_DIE_ECC, feature);
+ if ((feature[0] & ONFI_FEATURE_ON_DIE_ECC_EN) == 0)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ ret = micron_nand_on_die_ecc_setup(chip, false);
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ chip->onfi_get_features(nand_to_mtd(chip), chip,
+ ONFI_FEATURE_ON_DIE_ECC, feature);
+ if (feature[0] & ONFI_FEATURE_ON_DIE_ECC_EN)
+ return MICRON_ON_DIE_MANDATORY;
+
+ /*
+ * Some Micron NANDs have an on-die ECC of 4/512, some other
+ * 8/512. We only support the former.
+ */
+ if (chip->onfi_params.ecc_bits != 4)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ return MICRON_ON_DIE_SUPPORTED;
+}
+
static int micron_nand_init(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ int ondie;
int ret;
ret = micron_nand_onfi_init(chip);
@@ -78,6 +272,34 @@ static int micron_nand_init(struct nand_chip *chip)
if (mtd->writesize == 2048)
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ ondie = micron_supports_on_die_ecc(chip);
+
+ if (ondie == MICRON_ON_DIE_MANDATORY) {
+ pr_err("On-die ECC forcefully enabled, not supported\n");
+ return -EINVAL;
+ }
+
+ if (chip->ecc.mode == NAND_ECC_ON_DIE) {
+ if (ondie == MICRON_ON_DIE_UNSUPPORTED) {
+ pr_err("On-die ECC selected but not supported\n");
+ return -EINVAL;
+ }
+
+ chip->ecc.options = NAND_ECC_CUSTOM_PAGE_ACCESS;
+ chip->ecc.bytes = 8;
+ chip->ecc.size = 512;
+ chip->ecc.strength = 4;
+ chip->ecc.algo = NAND_ECC_BCH;
+ chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
+ chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
+ chip->ecc.read_page_raw =
+ micron_nand_read_page_raw_on_die_ecc;
+ chip->ecc.write_page_raw =
+ micron_nand_write_page_raw_on_die_ecc;
+
+ mtd_set_ooblayout(mtd, &micron_nand_on_die_ooblayout_ops);
+ }
+
return 0;
}
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index f8e463a97b9e..209170ed2b76 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -166,7 +166,11 @@ static int __init orion_nand_probe(struct platform_device *pdev)
}
}
- clk_prepare_enable(info->clk);
+ ret = clk_prepare_enable(info->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare clock!\n");
+ return ret;
+ }
ret = nand_scan(mtd, 1);
if (ret)
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 649ba8200832..74dae4bbdac8 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1812,6 +1812,8 @@ static int alloc_nand_resource(struct platform_device *pdev)
chip->write_buf = pxa3xx_nand_write_buf;
chip->options |= NAND_NO_SUBPAGE_WRITE;
chip->cmdfunc = nand_cmdfunc;
+ chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
+ chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
}
nand_hw_control_init(chip->controller);
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 57d483ac5765..88af7145a51a 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -2008,6 +2008,8 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
chip->read_byte = qcom_nandc_read_byte;
chip->read_buf = qcom_nandc_read_buf;
chip->write_buf = qcom_nandc_write_buf;
+ chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
+ chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
/*
* the bad block marker is readable only when we read the last codeword
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index f0b030d44f71..9e0c849607b9 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -812,9 +812,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
return -ENODEV;
}
-static int s3c2410_nand_setup_data_interface(struct mtd_info *mtd,
- const struct nand_data_interface *conf,
- bool check_only)
+static int s3c2410_nand_setup_data_interface(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
struct s3c2410_platform_nand *pdata = info->platform;
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 442ce619b3b6..891ac7b99305 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -1183,6 +1183,8 @@ static int flctl_probe(struct platform_device *pdev)
nand->read_buf = flctl_read_buf;
nand->select_chip = flctl_select_chip;
nand->cmdfunc = flctl_cmdfunc;
+ nand->onfi_set_features = nand_onfi_get_set_features_notsupp;
+ nand->onfi_get_features = nand_onfi_get_set_features_notsupp;
if (pdata->flcmncr_val & SEL_16BIT)
nand->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 118a26fff368..d0b6f8f9f297 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1301,7 +1301,6 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_enable(mtd);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
for (i = data_offs / ecc->size;
i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
int data_off = i * ecc->size;
@@ -1592,9 +1591,8 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
#define sunxi_nand_lookup_timing(l, p, c) \
_sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
-static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd,
- const struct nand_data_interface *conf,
- bool check_only)
+static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
@@ -1707,7 +1705,7 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd,
return tRHW;
}
- if (check_only)
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
/*
@@ -1922,7 +1920,6 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
ecc->read_oob_raw = nand_read_oob_std;
ecc->write_oob_raw = nand_write_oob_std;
- ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
return 0;
}
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 49b286c6c10f..9d40b793b1c4 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -303,7 +303,7 @@ static int tango_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const u8 *buf, int oob_required, int page)
{
struct tango_nfc *nfc = to_tango_nfc(chip->controller);
- int err, len = mtd->writesize;
+ int err, status, len = mtd->writesize;
/* Calling tango_write_oob() would send PAGEPROG twice */
if (oob_required)
@@ -314,6 +314,10 @@ static int tango_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (err)
return err;
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
return 0;
}
@@ -340,7 +344,7 @@ static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
if (!*buf) {
/* skip over "len" bytes */
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, *pos, -1);
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, *pos, -1);
} else {
tango_write_buf(mtd, *buf, len);
*buf += len;
@@ -431,9 +435,16 @@ static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const u8 *buf, int oob_required, int page)
{
+ int status;
+
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
raw_write(chip, buf, chip->oob_poi);
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
return 0;
}
@@ -484,9 +495,8 @@ static u32 to_ticks(int kHz, int ps)
return DIV_ROUND_UP_ULL((u64)kHz * ps, NSEC_PER_SEC);
}
-static int tango_set_timings(struct mtd_info *mtd,
- const struct nand_data_interface *conf,
- bool check_only)
+static int tango_set_timings(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
{
const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf);
struct nand_chip *chip = mtd_to_nand(mtd);
@@ -498,7 +508,7 @@ static int tango_set_timings(struct mtd_info *mtd,
if (IS_ERR(sdr))
return PTR_ERR(sdr);
- if (check_only)
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
Trdy = to_ticks(kHz, sdr->tCEA_max - sdr->tREA_max);
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 3ea4bb19e12d..744ab10e8962 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -703,6 +703,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
chip->read_buf = vf610_nfc_read_buf;
chip->write_buf = vf610_nfc_write_buf;
chip->select_chip = vf610_nfc_select_chip;
+ chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
+ chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
chip->options |= NAND_NO_SUBPAGE_WRITE;
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
new file mode 100644
index 000000000000..d206b3c533bc
--- /dev/null
+++ b/drivers/mtd/parsers/Kconfig
@@ -0,0 +1,8 @@
+config MTD_PARSER_TRX
+ tristate "Parser for TRX format partitions"
+ depends on MTD && (BCM47XX || ARCH_BCM_5301X || COMPILE_TEST)
+ help
+ TRX is a firmware format used by Broadcom on their devices. It
+ may contain up to 3/4 partitions (depending on the version).
+ This driver will parse TRX header and report at least two partitions:
+ kernel and rootfs.
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
new file mode 100644
index 000000000000..4d9024e0be3b
--- /dev/null
+++ b/drivers/mtd/parsers/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
diff --git a/drivers/mtd/parsers/parser_trx.c b/drivers/mtd/parsers/parser_trx.c
new file mode 100644
index 000000000000..df360a75e1eb
--- /dev/null
+++ b/drivers/mtd/parsers/parser_trx.c
@@ -0,0 +1,126 @@
+/*
+ * Parser for TRX format partitions
+ *
+ * Copyright (C) 2012 - 2017 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#define TRX_PARSER_MAX_PARTS 4
+
+/* Magics */
+#define TRX_MAGIC 0x30524448
+#define UBI_EC_MAGIC 0x23494255 /* UBI# */
+
+struct trx_header {
+ uint32_t magic;
+ uint32_t length;
+ uint32_t crc32;
+ uint16_t flags;
+ uint16_t version;
+ uint32_t offset[3];
+} __packed;
+
+static const char *parser_trx_data_part_name(struct mtd_info *master,
+ size_t offset)
+{
+ uint32_t buf;
+ size_t bytes_read;
+ int err;
+
+ err = mtd_read(master, offset, sizeof(buf), &bytes_read,
+ (uint8_t *)&buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%zX): %d\n",
+ offset, err);
+ goto out_default;
+ }
+
+ if (buf == UBI_EC_MAGIC)
+ return "ubi";
+
+out_default:
+ return "rootfs";
+}
+
+static int parser_trx_parse(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partition *parts;
+ struct mtd_partition *part;
+ struct trx_header trx;
+ size_t bytes_read;
+ uint8_t curr_part = 0, i = 0;
+ int err;
+
+ parts = kzalloc(sizeof(struct mtd_partition) * TRX_PARSER_MAX_PARTS,
+ GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ err = mtd_read(mtd, 0, sizeof(trx), &bytes_read, (uint8_t *)&trx);
+ if (err) {
+ pr_err("MTD reading error: %d\n", err);
+ kfree(parts);
+ return err;
+ }
+
+ if (trx.magic != TRX_MAGIC) {
+ kfree(parts);
+ return -ENOENT;
+ }
+
+ /* We have LZMA loader if there is address in offset[2] */
+ if (trx.offset[2]) {
+ part = &parts[curr_part++];
+ part->name = "loader";
+ part->offset = trx.offset[i];
+ i++;
+ }
+
+ if (trx.offset[i]) {
+ part = &parts[curr_part++];
+ part->name = "linux";
+ part->offset = trx.offset[i];
+ i++;
+ }
+
+ if (trx.offset[i]) {
+ part = &parts[curr_part++];
+ part->name = parser_trx_data_part_name(mtd, trx.offset[i]);
+ part->offset = trx.offset[i];
+ i++;
+ }
+
+ /*
+ * Assume that every partition ends at the beginning of the one it is
+ * followed by.
+ */
+ for (i = 0; i < curr_part; i++) {
+ u64 next_part_offset = (i < curr_part - 1) ?
+ parts[i + 1].offset : mtd->size;
+
+ parts[i].size = next_part_offset - parts[i].offset;
+ }
+
+ *pparts = parts;
+ return i;
+};
+
+static struct mtd_part_parser mtd_parser_trx = {
+ .parse_fn = parser_trx_parse,
+ .name = "trx",
+};
+module_mtd_part_parser(mtd_parser_trx);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Parser for TRX format partitions");
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index bfdfb1e72b38..293c8a4d1e49 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -108,7 +108,7 @@ config SPI_INTEL_SPI_PLATFORM
config SPI_STM32_QUADSPI
tristate "STM32 Quad SPI controller"
- depends on ARCH_STM32
+ depends on ARCH_STM32 || COMPILE_TEST
help
This enables support for the STM32 Quad SPI controller.
We only connect the NOR to this controller.
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c
index 56051d30f000..0106357421bd 100644
--- a/drivers/mtd/spi-nor/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/aspeed-smc.c
@@ -19,6 +19,7 @@
#include <linux/mtd/spi-nor.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/sizes.h>
#include <linux/sysfs.h>
#define DEVICE_NAME "aspeed-smc"
@@ -97,6 +98,7 @@ struct aspeed_smc_chip {
struct aspeed_smc_controller *controller;
void __iomem *ctl; /* control register */
void __iomem *ahb_base; /* base of chip window */
+ u32 ahb_window_size; /* chip mapping window size */
u32 ctl_val[smc_max]; /* control settings */
enum aspeed_smc_flash_type type; /* what type of flash */
struct spi_nor nor;
@@ -109,6 +111,7 @@ struct aspeed_smc_controller {
const struct aspeed_smc_info *info; /* type info of controller */
void __iomem *regs; /* controller registers */
void __iomem *ahb_base; /* per-chip windows resource */
+ u32 ahb_window_size; /* full mapping window size */
struct aspeed_smc_chip *chips[0]; /* pointers to attached chips */
};
@@ -180,8 +183,7 @@ struct aspeed_smc_controller {
#define CONTROL_KEEP_MASK \
(CONTROL_AAF_MODE | CONTROL_CE_INACTIVE_MASK | CONTROL_CLK_DIV4 | \
- CONTROL_IO_DUMMY_MASK | CONTROL_CLOCK_FREQ_SEL_MASK | \
- CONTROL_LSB_FIRST | CONTROL_CLOCK_MODE_3)
+ CONTROL_CLOCK_FREQ_SEL_MASK | CONTROL_LSB_FIRST | CONTROL_CLOCK_MODE_3)
/*
* The Segment Register uses a 8MB unit to encode the start address
@@ -194,6 +196,10 @@ struct aspeed_smc_controller {
#define SEGMENT_ADDR_REG0 0x30
#define SEGMENT_ADDR_START(_r) ((((_r) >> 16) & 0xFF) << 23)
#define SEGMENT_ADDR_END(_r) ((((_r) >> 24) & 0xFF) << 23)
+#define SEGMENT_ADDR_VALUE(start, end) \
+ (((((start) >> 23) & 0xFF) << 16) | ((((end) >> 23) & 0xFF) << 24))
+#define SEGMENT_ADDR_REG(controller, cs) \
+ ((controller)->regs + SEGMENT_ADDR_REG0 + (cs) * 4)
/*
* In user mode all data bytes read or written to the chip decode address
@@ -439,8 +445,7 @@ static void __iomem *aspeed_smc_chip_base(struct aspeed_smc_chip *chip,
u32 reg;
if (controller->info->nce > 1) {
- reg = readl(controller->regs + SEGMENT_ADDR_REG0 +
- chip->cs * 4);
+ reg = readl(SEGMENT_ADDR_REG(controller, chip->cs));
if (SEGMENT_ADDR_START(reg) >= SEGMENT_ADDR_END(reg))
return NULL;
@@ -451,6 +456,146 @@ static void __iomem *aspeed_smc_chip_base(struct aspeed_smc_chip *chip,
return controller->ahb_base + offset;
}
+static u32 aspeed_smc_ahb_base_phy(struct aspeed_smc_controller *controller)
+{
+ u32 seg0_val = readl(SEGMENT_ADDR_REG(controller, 0));
+
+ return SEGMENT_ADDR_START(seg0_val);
+}
+
+static u32 chip_set_segment(struct aspeed_smc_chip *chip, u32 cs, u32 start,
+ u32 size)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ void __iomem *seg_reg;
+ u32 seg_oldval, seg_newval, ahb_base_phy, end;
+
+ ahb_base_phy = aspeed_smc_ahb_base_phy(controller);
+
+ seg_reg = SEGMENT_ADDR_REG(controller, cs);
+ seg_oldval = readl(seg_reg);
+
+ /*
+ * If the chip size is not specified, use the default segment
+ * size, but take into account the possible overlap with the
+ * previous segment
+ */
+ if (!size)
+ size = SEGMENT_ADDR_END(seg_oldval) - start;
+
+ /*
+ * The segment cannot exceed the maximum window size of the
+ * controller.
+ */
+ if (start + size > ahb_base_phy + controller->ahb_window_size) {
+ size = ahb_base_phy + controller->ahb_window_size - start;
+ dev_warn(chip->nor.dev, "CE%d window resized to %dMB",
+ cs, size >> 20);
+ }
+
+ end = start + size;
+ seg_newval = SEGMENT_ADDR_VALUE(start, end);
+ writel(seg_newval, seg_reg);
+
+ /*
+ * Restore default value if something goes wrong. The chip
+ * might have set some bogus value and we would loose access
+ * to the chip.
+ */
+ if (seg_newval != readl(seg_reg)) {
+ dev_err(chip->nor.dev, "CE%d window invalid", cs);
+ writel(seg_oldval, seg_reg);
+ start = SEGMENT_ADDR_START(seg_oldval);
+ end = SEGMENT_ADDR_END(seg_oldval);
+ size = end - start;
+ }
+
+ dev_info(chip->nor.dev, "CE%d window [ 0x%.8x - 0x%.8x ] %dMB",
+ cs, start, end, size >> 20);
+
+ return size;
+}
+
+/*
+ * The segment register defines the mapping window on the AHB bus and
+ * it needs to be configured depending on the chip size. The segment
+ * register of the following CE also needs to be tuned in order to
+ * provide a contiguous window across multiple chips.
+ *
+ * This is expected to be called in increasing CE order
+ */
+static u32 aspeed_smc_chip_set_segment(struct aspeed_smc_chip *chip)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 ahb_base_phy, start;
+ u32 size = chip->nor.mtd.size;
+
+ /*
+ * Each controller has a chip size limit for direct memory
+ * access
+ */
+ if (size > controller->info->maxsize)
+ size = controller->info->maxsize;
+
+ /*
+ * The AST2400 SPI controller only handles one chip and does
+ * not have segment registers. Let's use the chip size for the
+ * AHB window.
+ */
+ if (controller->info == &spi_2400_info)
+ goto out;
+
+ /*
+ * The AST2500 SPI controller has a HW bug when the CE0 chip
+ * size reaches 128MB. Enforce a size limit of 120MB to
+ * prevent the controller from using bogus settings in the
+ * segment register.
+ */
+ if (chip->cs == 0 && controller->info == &spi_2500_info &&
+ size == SZ_128M) {
+ size = 120 << 20;
+ dev_info(chip->nor.dev,
+ "CE%d window resized to %dMB (AST2500 HW quirk)",
+ chip->cs, size >> 20);
+ }
+
+ ahb_base_phy = aspeed_smc_ahb_base_phy(controller);
+
+ /*
+ * As a start address for the current segment, use the default
+ * start address if we are handling CE0 or use the previous
+ * segment ending address
+ */
+ if (chip->cs) {
+ u32 prev = readl(SEGMENT_ADDR_REG(controller, chip->cs - 1));
+
+ start = SEGMENT_ADDR_END(prev);
+ } else {
+ start = ahb_base_phy;
+ }
+
+ size = chip_set_segment(chip, chip->cs, start, size);
+
+ /* Update chip base address on the AHB bus */
+ chip->ahb_base = controller->ahb_base + (start - ahb_base_phy);
+
+ /*
+ * Now, make sure the next segment does not overlap with the
+ * current one we just configured, even if there is no
+ * available chip. That could break access in Command Mode.
+ */
+ if (chip->cs < controller->info->nce - 1)
+ chip_set_segment(chip, chip->cs + 1, start + size, 0);
+
+out:
+ if (size < chip->nor.mtd.size)
+ dev_warn(chip->nor.dev,
+ "CE%d window too small for chip %dMB",
+ chip->cs, (u32)chip->nor.mtd.size >> 20);
+
+ return size;
+}
+
static void aspeed_smc_chip_enable_write(struct aspeed_smc_chip *chip)
{
struct aspeed_smc_controller *controller = chip->controller;
@@ -524,7 +669,7 @@ static int aspeed_smc_chip_setup_init(struct aspeed_smc_chip *chip,
*/
chip->ahb_base = aspeed_smc_chip_base(chip, res);
if (!chip->ahb_base) {
- dev_warn(chip->nor.dev, "CE segment window closed.\n");
+ dev_warn(chip->nor.dev, "CE%d window closed", chip->cs);
return -EINVAL;
}
@@ -571,6 +716,9 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
if (chip->nor.addr_width == 4 && info->set_4b)
info->set_4b(chip);
+ /* This is for direct AHB access when using Command Mode. */
+ chip->ahb_window_size = aspeed_smc_chip_set_segment(chip);
+
/*
* base mode has not been optimized yet. use it for writes.
*/
@@ -585,14 +733,12 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
* TODO: Adjust clocks if fast read is supported and interpret
* SPI-NOR flags to adjust controller settings.
*/
- switch (chip->nor.flash_read) {
- case SPI_NOR_NORMAL:
- cmd = CONTROL_COMMAND_MODE_NORMAL;
- break;
- case SPI_NOR_FAST:
- cmd = CONTROL_COMMAND_MODE_FREAD;
- break;
- default:
+ if (chip->nor.read_proto == SNOR_PROTO_1_1_1) {
+ if (chip->nor.read_dummy == 0)
+ cmd = CONTROL_COMMAND_MODE_NORMAL;
+ else
+ cmd = CONTROL_COMMAND_MODE_FREAD;
+ } else {
dev_err(chip->nor.dev, "unsupported SPI read mode\n");
return -EINVAL;
}
@@ -608,6 +754,11 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
struct device_node *np, struct resource *r)
{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP,
+ };
const struct aspeed_smc_info *info = controller->info;
struct device *dev = controller->dev;
struct device_node *child;
@@ -671,11 +822,11 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
break;
/*
- * TODO: Add support for SPI_NOR_QUAD and SPI_NOR_DUAL
+ * TODO: Add support for Dual and Quad SPI protocols
* attach when board support is present as determined
* by of property.
*/
- ret = spi_nor_scan(nor, NULL, SPI_NOR_NORMAL);
+ ret = spi_nor_scan(nor, NULL, &hwcaps);
if (ret)
break;
@@ -731,6 +882,8 @@ static int aspeed_smc_probe(struct platform_device *pdev)
if (IS_ERR(controller->ahb_base))
return PTR_ERR(controller->ahb_base);
+ controller->ahb_window_size = resource_size(res);
+
ret = aspeed_smc_setup_flash(controller, np, res);
if (ret)
dev_err(dev, "Aspeed SMC probe failed %d\n", ret);
diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/mtd/spi-nor/atmel-quadspi.c
index 47937d9beec6..ba76fa8f2031 100644
--- a/drivers/mtd/spi-nor/atmel-quadspi.c
+++ b/drivers/mtd/spi-nor/atmel-quadspi.c
@@ -275,14 +275,48 @@ static void atmel_qspi_debug_command(struct atmel_qspi *aq,
static int atmel_qspi_run_command(struct atmel_qspi *aq,
const struct atmel_qspi_command *cmd,
- u32 ifr_tfrtyp, u32 ifr_width)
+ u32 ifr_tfrtyp, enum spi_nor_protocol proto)
{
u32 iar, icr, ifr, sr;
int err = 0;
iar = 0;
icr = 0;
- ifr = ifr_tfrtyp | ifr_width;
+ ifr = ifr_tfrtyp;
+
+ /* Set the SPI protocol */
+ switch (proto) {
+ case SNOR_PROTO_1_1_1:
+ ifr |= QSPI_IFR_WIDTH_SINGLE_BIT_SPI;
+ break;
+
+ case SNOR_PROTO_1_1_2:
+ ifr |= QSPI_IFR_WIDTH_DUAL_OUTPUT;
+ break;
+
+ case SNOR_PROTO_1_1_4:
+ ifr |= QSPI_IFR_WIDTH_QUAD_OUTPUT;
+ break;
+
+ case SNOR_PROTO_1_2_2:
+ ifr |= QSPI_IFR_WIDTH_DUAL_IO;
+ break;
+
+ case SNOR_PROTO_1_4_4:
+ ifr |= QSPI_IFR_WIDTH_QUAD_IO;
+ break;
+
+ case SNOR_PROTO_2_2_2:
+ ifr |= QSPI_IFR_WIDTH_DUAL_CMD;
+ break;
+
+ case SNOR_PROTO_4_4_4:
+ ifr |= QSPI_IFR_WIDTH_QUAD_CMD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
/* Compute instruction parameters */
if (cmd->enable.bits.instruction) {
@@ -434,7 +468,7 @@ static int atmel_qspi_read_reg(struct spi_nor *nor, u8 opcode,
cmd.rx_buf = buf;
cmd.buf_len = len;
return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ,
- QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+ nor->reg_proto);
}
static int atmel_qspi_write_reg(struct spi_nor *nor, u8 opcode,
@@ -450,7 +484,7 @@ static int atmel_qspi_write_reg(struct spi_nor *nor, u8 opcode,
cmd.tx_buf = buf;
cmd.buf_len = len;
return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
- QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+ nor->reg_proto);
}
static ssize_t atmel_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
@@ -469,7 +503,7 @@ static ssize_t atmel_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
cmd.tx_buf = write_buf;
cmd.buf_len = len;
ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM,
- QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+ nor->write_proto);
return (ret < 0) ? ret : len;
}
@@ -484,7 +518,7 @@ static int atmel_qspi_erase(struct spi_nor *nor, loff_t offs)
cmd.instruction = nor->erase_opcode;
cmd.address = (u32)offs;
return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
- QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+ nor->reg_proto);
}
static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
@@ -493,27 +527,8 @@ static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
struct atmel_qspi *aq = nor->priv;
struct atmel_qspi_command cmd;
u8 num_mode_cycles, num_dummy_cycles;
- u32 ifr_width;
ssize_t ret;
- switch (nor->flash_read) {
- case SPI_NOR_NORMAL:
- case SPI_NOR_FAST:
- ifr_width = QSPI_IFR_WIDTH_SINGLE_BIT_SPI;
- break;
-
- case SPI_NOR_DUAL:
- ifr_width = QSPI_IFR_WIDTH_DUAL_OUTPUT;
- break;
-
- case SPI_NOR_QUAD:
- ifr_width = QSPI_IFR_WIDTH_QUAD_OUTPUT;
- break;
-
- default:
- return -EINVAL;
- }
-
if (nor->read_dummy >= 2) {
num_mode_cycles = 2;
num_dummy_cycles = nor->read_dummy - 2;
@@ -536,7 +551,7 @@ static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
cmd.rx_buf = read_buf;
cmd.buf_len = len;
ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ_MEM,
- ifr_width);
+ nor->read_proto);
return (ret < 0) ? ret : len;
}
@@ -590,6 +605,20 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
static int atmel_qspi_probe(struct platform_device *pdev)
{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_READ_1_1_2 |
+ SNOR_HWCAPS_READ_1_2_2 |
+ SNOR_HWCAPS_READ_2_2_2 |
+ SNOR_HWCAPS_READ_1_1_4 |
+ SNOR_HWCAPS_READ_1_4_4 |
+ SNOR_HWCAPS_READ_4_4_4 |
+ SNOR_HWCAPS_PP |
+ SNOR_HWCAPS_PP_1_1_4 |
+ SNOR_HWCAPS_PP_1_4_4 |
+ SNOR_HWCAPS_PP_4_4_4,
+ };
struct device_node *child, *np = pdev->dev.of_node;
struct atmel_qspi *aq;
struct resource *res;
@@ -679,7 +708,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
if (err)
goto disable_clk;
- err = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+ err = spi_nor_scan(nor, NULL, &hwcaps);
if (err)
goto disable_clk;
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 9f8102de1b16..53c7d8e0327a 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -855,15 +855,14 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read)
f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
if (read) {
- switch (nor->flash_read) {
- case SPI_NOR_NORMAL:
- case SPI_NOR_FAST:
+ switch (nor->read_proto) {
+ case SNOR_PROTO_1_1_1:
f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
break;
- case SPI_NOR_DUAL:
+ case SNOR_PROTO_1_1_2:
f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
break;
- case SPI_NOR_QUAD:
+ case SNOR_PROTO_1_1_4:
f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
break;
default:
@@ -1069,6 +1068,13 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_READ_1_1_2 |
+ SNOR_HWCAPS_READ_1_1_4 |
+ SNOR_HWCAPS_PP,
+ };
struct platform_device *pdev = cqspi->pdev;
struct device *dev = &pdev->dev;
struct cqspi_flash_pdata *f_pdata;
@@ -1123,7 +1129,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
goto err;
}
- ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+ ret = spi_nor_scan(nor, NULL, &hwcaps);
if (ret)
goto err;
@@ -1277,7 +1283,7 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
#define CQSPI_DEV_PM_OPS NULL
#endif
-static struct of_device_id const cqspi_dt_ids[] = {
+static const struct of_device_id cqspi_dt_ids[] = {
{.compatible = "cdns,qspi-nor",},
{ /* end of table */ }
};
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 1476135e0d50..f17d22435bfc 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -957,6 +957,10 @@ static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
static int fsl_qspi_probe(struct platform_device *pdev)
{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ_1_1_4 |
+ SNOR_HWCAPS_PP,
+ };
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct fsl_qspi *q;
@@ -1065,7 +1069,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
/* set the chip address for READID */
fsl_qspi_set_base_addr(q, nor);
- ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+ ret = spi_nor_scan(nor, NULL, &hwcaps);
if (ret)
goto mutex_failed;
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
index a286350627a6..d1106832b9d5 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -120,19 +120,24 @@ static inline int wait_op_finish(struct hifmc_host *host)
(reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT);
}
-static int get_if_type(enum read_mode flash_read)
+static int get_if_type(enum spi_nor_protocol proto)
{
enum hifmc_iftype if_type;
- switch (flash_read) {
- case SPI_NOR_DUAL:
+ switch (proto) {
+ case SNOR_PROTO_1_1_2:
if_type = IF_TYPE_DUAL;
break;
- case SPI_NOR_QUAD:
+ case SNOR_PROTO_1_2_2:
+ if_type = IF_TYPE_DIO;
+ break;
+ case SNOR_PROTO_1_1_4:
if_type = IF_TYPE_QUAD;
break;
- case SPI_NOR_NORMAL:
- case SPI_NOR_FAST:
+ case SNOR_PROTO_1_4_4:
+ if_type = IF_TYPE_QIO;
+ break;
+ case SNOR_PROTO_1_1_1:
default:
if_type = IF_TYPE_STD;
break;
@@ -253,7 +258,10 @@ static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN);
reg = OP_CFG_FM_CS(priv->chipselect);
- if_type = get_if_type(nor->flash_read);
+ if (op_type == FMC_OP_READ)
+ if_type = get_if_type(nor->read_proto);
+ else
+ if_type = get_if_type(nor->write_proto);
reg |= OP_CFG_MEM_IF_TYPE(if_type);
if (op_type == FMC_OP_READ)
reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3);
@@ -321,6 +329,13 @@ static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to,
static int hisi_spi_nor_register(struct device_node *np,
struct hifmc_host *host)
{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_READ_1_1_2 |
+ SNOR_HWCAPS_READ_1_1_4 |
+ SNOR_HWCAPS_PP,
+ };
struct device *dev = host->dev;
struct spi_nor *nor;
struct hifmc_priv *priv;
@@ -362,7 +377,7 @@ static int hisi_spi_nor_register(struct device_node *np,
nor->read = hisi_spi_nor_read;
nor->write = hisi_spi_nor_write;
nor->erase = NULL;
- ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+ ret = spi_nor_scan(nor, NULL, &hwcaps);
if (ret)
return ret;
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index 986a3d020a3a..8a596bfeddff 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -715,6 +715,11 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
struct intel_spi *intel_spi_probe(struct device *dev,
struct resource *mem, const struct intel_spi_boardinfo *info)
{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP,
+ };
struct mtd_partition part;
struct intel_spi *ispi;
int ret;
@@ -746,7 +751,7 @@ struct intel_spi *intel_spi_probe(struct device *dev,
ispi->nor.write = intel_spi_write;
ispi->nor.erase = intel_spi_erase;
- ret = spi_nor_scan(&ispi->nor, NULL, SPI_NOR_NORMAL);
+ ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
if (ret) {
dev_info(dev, "failed to locate the chip\n");
return ERR_PTR(ret);
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index b6377707ce32..8a20ec4991c8 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -123,20 +123,20 @@ static void mt8173_nor_set_read_mode(struct mt8173_nor *mt8173_nor)
{
struct spi_nor *nor = &mt8173_nor->nor;
- switch (nor->flash_read) {
- case SPI_NOR_FAST:
+ switch (nor->read_proto) {
+ case SNOR_PROTO_1_1_1:
writeb(nor->read_opcode, mt8173_nor->base +
MTK_NOR_PRGDATA3_REG);
writeb(MTK_NOR_FAST_READ, mt8173_nor->base +
MTK_NOR_CFG1_REG);
break;
- case SPI_NOR_DUAL:
+ case SNOR_PROTO_1_1_2:
writeb(nor->read_opcode, mt8173_nor->base +
MTK_NOR_PRGDATA3_REG);
writeb(MTK_NOR_DUAL_READ_EN, mt8173_nor->base +
MTK_NOR_DUAL_REG);
break;
- case SPI_NOR_QUAD:
+ case SNOR_PROTO_1_1_4:
writeb(nor->read_opcode, mt8173_nor->base +
MTK_NOR_PRGDATA4_REG);
writeb(MTK_NOR_QUAD_READ_EN, mt8173_nor->base +
@@ -408,6 +408,11 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
struct device_node *flash_node)
{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_READ_1_1_2 |
+ SNOR_HWCAPS_PP,
+ };
int ret;
struct spi_nor *nor;
@@ -426,7 +431,7 @@ static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
nor->write_reg = mt8173_nor_write_reg;
nor->mtd.name = "mtk_nor";
/* initialized with NULL */
- ret = spi_nor_scan(nor, NULL, SPI_NOR_DUAL);
+ ret = spi_nor_scan(nor, NULL, &hwcaps);
if (ret)
return ret;
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c
index 73a14f40928b..15374216d4d9 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/nxp-spifi.c
@@ -240,13 +240,12 @@ static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
{
- switch (spifi->nor.flash_read) {
- case SPI_NOR_NORMAL:
- case SPI_NOR_FAST:
+ switch (spifi->nor.read_proto) {
+ case SNOR_PROTO_1_1_1:
spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL;
break;
- case SPI_NOR_DUAL:
- case SPI_NOR_QUAD:
+ case SNOR_PROTO_1_1_2:
+ case SNOR_PROTO_1_1_4:
spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA;
break;
default:
@@ -274,7 +273,11 @@ static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
struct device_node *np)
{
- enum read_mode flash_read;
+ struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP,
+ };
u32 ctrl, property;
u16 mode = 0;
int ret;
@@ -308,13 +311,12 @@ static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
if (mode & SPI_RX_DUAL) {
ctrl |= SPIFI_CTRL_DUAL;
- flash_read = SPI_NOR_DUAL;
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
} else if (mode & SPI_RX_QUAD) {
ctrl &= ~SPIFI_CTRL_DUAL;
- flash_read = SPI_NOR_QUAD;
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
} else {
ctrl |= SPIFI_CTRL_DUAL;
- flash_read = SPI_NOR_NORMAL;
}
switch (mode & (SPI_CPHA | SPI_CPOL)) {
@@ -351,7 +353,7 @@ static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
*/
nxp_spifi_dummy_id_read(&spifi->nor);
- ret = spi_nor_scan(&spifi->nor, NULL, flash_read);
+ ret = spi_nor_scan(&spifi->nor, NULL, &hwcaps);
if (ret) {
dev_err(spifi->dev, "device scan failed\n");
return ret;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index dea8c9cbadf0..1413828ff1fb 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -150,24 +150,6 @@ static int read_cr(struct spi_nor *nor)
}
/*
- * Dummy Cycle calculation for different type of read.
- * It can be used to support more commands with
- * different dummy cycle requirements.
- */
-static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
-{
- switch (nor->flash_read) {
- case SPI_NOR_FAST:
- case SPI_NOR_DUAL:
- case SPI_NOR_QUAD:
- return 8;
- case SPI_NOR_NORMAL:
- return 0;
- }
- return 0;
-}
-
-/*
* Write status register 1 byte
* Returns negative if error occurred.
*/
@@ -221,6 +203,10 @@ static inline u8 spi_nor_convert_3to4_read(u8 opcode)
{ SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
{ SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
{ SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+
+ { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
+ { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
+ { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
};
return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
@@ -1022,10 +1008,12 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
/* Micron */
@@ -1036,7 +1024,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
@@ -1076,6 +1064,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -1159,7 +1148,9 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
@@ -1403,8 +1394,9 @@ static int macronix_quad_enable(struct spi_nor *nor)
write_sr(nor, val | SR_QUAD_EN_MX);
- if (spi_nor_wait_till_ready(nor))
- return 1;
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
ret = read_sr(nor);
if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
@@ -1460,30 +1452,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
return 0;
}
-static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
-{
- int status;
-
- switch (JEDEC_MFR(info)) {
- case SNOR_MFR_MACRONIX:
- status = macronix_quad_enable(nor);
- if (status) {
- dev_err(nor->dev, "Macronix quad-read not enabled\n");
- return -EINVAL;
- }
- return status;
- case SNOR_MFR_MICRON:
- return 0;
- default:
- status = spansion_quad_enable(nor);
- if (status) {
- dev_err(nor->dev, "Spansion quad-read not enabled\n");
- return -EINVAL;
- }
- return status;
- }
-}
-
static int spi_nor_check(struct spi_nor *nor)
{
if (!nor->dev || !nor->read || !nor->write ||
@@ -1536,8 +1504,349 @@ static int s3an_nor_scan(const struct flash_info *info, struct spi_nor *nor)
return 0;
}
-int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
+struct spi_nor_read_command {
+ u8 num_mode_clocks;
+ u8 num_wait_states;
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+struct spi_nor_pp_command {
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+enum spi_nor_read_command_index {
+ SNOR_CMD_READ,
+ SNOR_CMD_READ_FAST,
+ SNOR_CMD_READ_1_1_1_DTR,
+
+ /* Dual SPI */
+ SNOR_CMD_READ_1_1_2,
+ SNOR_CMD_READ_1_2_2,
+ SNOR_CMD_READ_2_2_2,
+ SNOR_CMD_READ_1_2_2_DTR,
+
+ /* Quad SPI */
+ SNOR_CMD_READ_1_1_4,
+ SNOR_CMD_READ_1_4_4,
+ SNOR_CMD_READ_4_4_4,
+ SNOR_CMD_READ_1_4_4_DTR,
+
+ /* Octo SPI */
+ SNOR_CMD_READ_1_1_8,
+ SNOR_CMD_READ_1_8_8,
+ SNOR_CMD_READ_8_8_8,
+ SNOR_CMD_READ_1_8_8_DTR,
+
+ SNOR_CMD_READ_MAX
+};
+
+enum spi_nor_pp_command_index {
+ SNOR_CMD_PP,
+
+ /* Quad SPI */
+ SNOR_CMD_PP_1_1_4,
+ SNOR_CMD_PP_1_4_4,
+ SNOR_CMD_PP_4_4_4,
+
+ /* Octo SPI */
+ SNOR_CMD_PP_1_1_8,
+ SNOR_CMD_PP_1_8_8,
+ SNOR_CMD_PP_8_8_8,
+
+ SNOR_CMD_PP_MAX
+};
+
+struct spi_nor_flash_parameter {
+ u64 size;
+ u32 page_size;
+
+ struct spi_nor_hwcaps hwcaps;
+ struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
+ struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
+
+ int (*quad_enable)(struct spi_nor *nor);
+};
+
+static void
+spi_nor_set_read_settings(struct spi_nor_read_command *read,
+ u8 num_mode_clocks,
+ u8 num_wait_states,
+ u8 opcode,
+ enum spi_nor_protocol proto)
{
+ read->num_mode_clocks = num_mode_clocks;
+ read->num_wait_states = num_wait_states;
+ read->opcode = opcode;
+ read->proto = proto;
+}
+
+static void
+spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
+ u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ pp->opcode = opcode;
+ pp->proto = proto;
+}
+
+static int spi_nor_init_params(struct spi_nor *nor,
+ const struct flash_info *info,
+ struct spi_nor_flash_parameter *params)
+{
+ /* Set legacy flash parameters as default. */
+ memset(params, 0, sizeof(*params));
+
+ /* Set SPI NOR sizes. */
+ params->size = info->sector_size * info->n_sectors;
+ params->page_size = info->page_size;
+
+ /* (Fast) Read settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+ 0, 0, SPINOR_OP_READ,
+ SNOR_PROTO_1_1_1);
+
+ if (!(info->flags & SPI_NOR_NO_FR)) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+ 0, 8, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_1_1_1);
+ }
+
+ if (info->flags & SPI_NOR_DUAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
+ 0, 8, SPINOR_OP_READ_1_1_2,
+ SNOR_PROTO_1_1_2);
+ }
+
+ if (info->flags & SPI_NOR_QUAD_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
+ 0, 8, SPINOR_OP_READ_1_1_4,
+ SNOR_PROTO_1_1_4);
+ }
+
+ /* Page Program settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+
+ /* Select the procedure to set the Quad Enable bit. */
+ if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
+ SNOR_HWCAPS_PP_QUAD)) {
+ switch (JEDEC_MFR(info)) {
+ case SNOR_MFR_MACRONIX:
+ params->quad_enable = macronix_quad_enable;
+ break;
+
+ case SNOR_MFR_MICRON:
+ break;
+
+ default:
+ params->quad_enable = spansion_quad_enable;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == (int)hwcaps)
+ return table[i][1];
+
+ return -EINVAL;
+}
+
+static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
+{
+ static const int hwcaps_read2cmd[][2] = {
+ { SNOR_HWCAPS_READ, SNOR_CMD_READ },
+ { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
+ { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
+ { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
+ { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
+ { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
+ { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
+ { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
+ { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
+ { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
+ { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
+ { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
+ ARRAY_SIZE(hwcaps_read2cmd));
+}
+
+static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
+{
+ static const int hwcaps_pp2cmd[][2] = {
+ { SNOR_HWCAPS_PP, SNOR_CMD_PP },
+ { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
+ { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
+ { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
+ { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
+ { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
+ { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
+ ARRAY_SIZE(hwcaps_pp2cmd));
+}
+
+static int spi_nor_select_read(struct spi_nor *nor,
+ const struct spi_nor_flash_parameter *params,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
+ const struct spi_nor_read_command *read;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ read = &params->reads[cmd];
+ nor->read_opcode = read->opcode;
+ nor->read_proto = read->proto;
+
+ /*
+ * In the spi-nor framework, we don't need to make the difference
+ * between mode clock cycles and wait state clock cycles.
+ * Indeed, the value of the mode clock cycles is used by a QSPI
+ * flash memory to know whether it should enter or leave its 0-4-4
+ * (Continuous Read / XIP) mode.
+ * eXecution In Place is out of the scope of the mtd sub-system.
+ * Hence we choose to merge both mode and wait state clock cycles
+ * into the so called dummy clock cycles.
+ */
+ nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
+ return 0;
+}
+
+static int spi_nor_select_pp(struct spi_nor *nor,
+ const struct spi_nor_flash_parameter *params,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
+ const struct spi_nor_pp_command *pp;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ pp = &params->page_programs[cmd];
+ nor->program_opcode = pp->opcode;
+ nor->write_proto = pp->proto;
+ return 0;
+}
+
+static int spi_nor_select_erase(struct spi_nor *nor,
+ const struct flash_info *info)
+{
+ struct mtd_info *mtd = &nor->mtd;
+
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+ if (info->flags & SECT_4K) {
+ nor->erase_opcode = SPINOR_OP_BE_4K;
+ mtd->erasesize = 4096;
+ } else if (info->flags & SECT_4K_PMC) {
+ nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
+ mtd->erasesize = 4096;
+ } else
+#endif
+ {
+ nor->erase_opcode = SPINOR_OP_SE;
+ mtd->erasesize = info->sector_size;
+ }
+ return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
+ const struct spi_nor_flash_parameter *params,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ u32 ignored_mask, shared_mask;
+ bool enable_quad_io;
+ int err;
+
+ /*
+ * Keep only the hardware capabilities supported by both the SPI
+ * controller and the SPI flash memory.
+ */
+ shared_mask = hwcaps->mask & params->hwcaps.mask;
+
+ /* SPI n-n-n protocols are not supported yet. */
+ ignored_mask = (SNOR_HWCAPS_READ_2_2_2 |
+ SNOR_HWCAPS_READ_4_4_4 |
+ SNOR_HWCAPS_READ_8_8_8 |
+ SNOR_HWCAPS_PP_4_4_4 |
+ SNOR_HWCAPS_PP_8_8_8);
+ if (shared_mask & ignored_mask) {
+ dev_dbg(nor->dev,
+ "SPI n-n-n protocols are not supported yet.\n");
+ shared_mask &= ~ignored_mask;
+ }
+
+ /* Select the (Fast) Read command. */
+ err = spi_nor_select_read(nor, params, shared_mask);
+ if (err) {
+ dev_err(nor->dev,
+ "can't select read settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Page Program command. */
+ err = spi_nor_select_pp(nor, params, shared_mask);
+ if (err) {
+ dev_err(nor->dev,
+ "can't select write settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Sector Erase command. */
+ err = spi_nor_select_erase(nor, info);
+ if (err) {
+ dev_err(nor->dev,
+ "can't select erase settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Enable Quad I/O if needed. */
+ enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
+ spi_nor_get_protocol_width(nor->write_proto) == 4);
+ if (enable_quad_io && params->quad_enable) {
+ err = params->quad_enable(nor);
+ if (err) {
+ dev_err(nor->dev, "quad mode not supported\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int spi_nor_scan(struct spi_nor *nor, const char *name,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ struct spi_nor_flash_parameter params;
const struct flash_info *info = NULL;
struct device *dev = nor->dev;
struct mtd_info *mtd = &nor->mtd;
@@ -1549,6 +1858,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
if (ret)
return ret;
+ /* Reset SPI protocol for all commands. */
+ nor->reg_proto = SNOR_PROTO_1_1_1;
+ nor->read_proto = SNOR_PROTO_1_1_1;
+ nor->write_proto = SNOR_PROTO_1_1_1;
+
if (name)
info = spi_nor_match_id(name);
/* Try to auto-detect if chip name wasn't specified or not found */
@@ -1591,6 +1905,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
if (info->flags & SPI_S3AN)
nor->flags |= SNOR_F_READY_XSR_RDY;
+ /* Parse the Serial Flash Discoverable Parameters table. */
+ ret = spi_nor_init_params(nor, info, &params);
+ if (ret)
+ return ret;
+
/*
* Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
* with the software protection bits set
@@ -1611,7 +1930,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
mtd->type = MTD_NORFLASH;
mtd->writesize = 1;
mtd->flags = MTD_CAP_NORFLASH;
- mtd->size = info->sector_size * info->n_sectors;
+ mtd->size = params.size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
@@ -1642,75 +1961,38 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
if (info->flags & NO_CHIP_ERASE)
nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
-#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
- /* prefer "small sector" erase if possible */
- if (info->flags & SECT_4K) {
- nor->erase_opcode = SPINOR_OP_BE_4K;
- mtd->erasesize = 4096;
- } else if (info->flags & SECT_4K_PMC) {
- nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
- mtd->erasesize = 4096;
- } else
-#endif
- {
- nor->erase_opcode = SPINOR_OP_SE;
- mtd->erasesize = info->sector_size;
- }
-
if (info->flags & SPI_NOR_NO_ERASE)
mtd->flags |= MTD_NO_ERASE;
mtd->dev.parent = dev;
- nor->page_size = info->page_size;
+ nor->page_size = params.page_size;
mtd->writebufsize = nor->page_size;
if (np) {
/* If we were instantiated by DT, use it */
if (of_property_read_bool(np, "m25p,fast-read"))
- nor->flash_read = SPI_NOR_FAST;
+ params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
else
- nor->flash_read = SPI_NOR_NORMAL;
+ params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
} else {
/* If we weren't instantiated by DT, default to fast-read */
- nor->flash_read = SPI_NOR_FAST;
+ params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
}
/* Some devices cannot do fast-read, no matter what DT tells us */
if (info->flags & SPI_NOR_NO_FR)
- nor->flash_read = SPI_NOR_NORMAL;
-
- /* Quad/Dual-read mode takes precedence over fast/normal */
- if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
- ret = set_quad_mode(nor, info);
- if (ret) {
- dev_err(dev, "quad mode not supported\n");
- return ret;
- }
- nor->flash_read = SPI_NOR_QUAD;
- } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
- nor->flash_read = SPI_NOR_DUAL;
- }
-
- /* Default commands */
- switch (nor->flash_read) {
- case SPI_NOR_QUAD:
- nor->read_opcode = SPINOR_OP_READ_1_1_4;
- break;
- case SPI_NOR_DUAL:
- nor->read_opcode = SPINOR_OP_READ_1_1_2;
- break;
- case SPI_NOR_FAST:
- nor->read_opcode = SPINOR_OP_READ_FAST;
- break;
- case SPI_NOR_NORMAL:
- nor->read_opcode = SPINOR_OP_READ;
- break;
- default:
- dev_err(dev, "No Read opcode defined\n");
- return -EINVAL;
- }
+ params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
- nor->program_opcode = SPINOR_OP_PP;
+ /*
+ * Configure the SPI memory:
+ * - select op codes for (Fast) Read, Page Program and Sector Erase.
+ * - set the number of dummy cycles (mode cycles + wait states).
+ * - set the SPI protocols for register and memory accesses.
+ * - set the Quad Enable bit if needed (required by SPI x-y-4 protos).
+ */
+ ret = spi_nor_setup(nor, info, &params, hwcaps);
+ if (ret)
+ return ret;
if (info->addr_width)
nor->addr_width = info->addr_width;
@@ -1732,8 +2014,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
return -EINVAL;
}
- nor->read_dummy = spi_nor_read_dummy_cycles(nor);
-
if (info->flags & SPI_S3AN) {
ret = s3an_nor_scan(info, nor);
if (ret)
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
index ae45f81b8cd3..86c0931543c5 100644
--- a/drivers/mtd/spi-nor/stm32-quadspi.c
+++ b/drivers/mtd/spi-nor/stm32-quadspi.c
@@ -19,6 +19,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/sizes.h>
#define QUADSPI_CR 0x00
#define CR_EN BIT(0)
@@ -192,15 +193,15 @@ static void stm32_qspi_set_framemode(struct spi_nor *nor,
cmd->framemode = CCR_IMODE_1;
if (read) {
- switch (nor->flash_read) {
- case SPI_NOR_NORMAL:
- case SPI_NOR_FAST:
+ switch (nor->read_proto) {
+ default:
+ case SNOR_PROTO_1_1_1:
dmode = CCR_DMODE_1;
break;
- case SPI_NOR_DUAL:
+ case SNOR_PROTO_1_1_2:
dmode = CCR_DMODE_2;
break;
- case SPI_NOR_QUAD:
+ case SNOR_PROTO_1_1_4:
dmode = CCR_DMODE_4;
break;
}
@@ -375,7 +376,7 @@ static ssize_t stm32_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
struct stm32_qspi_cmd cmd;
int err;
- dev_dbg(qspi->dev, "read(%#.2x): buf:%p from:%#.8x len:%#x\n",
+ dev_dbg(qspi->dev, "read(%#.2x): buf:%p from:%#.8x len:%#zx\n",
nor->read_opcode, buf, (u32)from, len);
memset(&cmd, 0, sizeof(cmd));
@@ -402,7 +403,7 @@ static ssize_t stm32_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
struct stm32_qspi_cmd cmd;
int err;
- dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#x\n",
+ dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#zx\n",
nor->program_opcode, buf, (u32)to, len);
memset(&cmd, 0, sizeof(cmd));
@@ -480,7 +481,12 @@ static void stm32_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
struct device_node *np)
{
- u32 width, flash_read, presc, cs_num, max_rate = 0;
+ struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP,
+ };
+ u32 width, presc, cs_num, max_rate = 0;
struct stm32_qspi_flash *flash;
struct mtd_info *mtd;
int ret;
@@ -499,12 +505,10 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
width = 1;
if (width == 4)
- flash_read = SPI_NOR_QUAD;
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
else if (width == 2)
- flash_read = SPI_NOR_DUAL;
- else if (width == 1)
- flash_read = SPI_NOR_NORMAL;
- else
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+ else if (width != 1)
return -EINVAL;
flash = &qspi->flash[cs_num];
@@ -539,7 +543,7 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
*/
flash->fsize = FSIZE_VAL(SZ_1K);
- ret = spi_nor_scan(&flash->nor, NULL, flash_read);
+ ret = spi_nor_scan(&flash->nor, NULL, &hwcaps);
if (ret) {
dev_err(qspi->dev, "device scan failed\n");
return ret;
diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c
index aecc6ce5a9e1..fa2519ad2435 100644
--- a/drivers/mtd/tests/subpagetest.c
+++ b/drivers/mtd/tests/subpagetest.c
@@ -102,7 +102,7 @@ static int write_eraseblock2(int ebnum)
if (unlikely(err || written != subpgsize * k)) {
pr_err("error: write failed at %#llx\n",
(long long)addr);
- if (written != subpgsize) {
+ if (written != subpgsize * k) {
pr_err(" write size: %#x\n",
subpgsize * k);
pr_err(" written: %#08zx\n",
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 14ff622190a5..181839d6fbea 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4596,7 +4596,7 @@ static int bond_check_params(struct bond_params *params)
}
ad_user_port_key = valptr->value;
- if (bond_mode == BOND_MODE_TLB) {
+ if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) {
bond_opt_initstr(&newval, "default");
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
&newval);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index e68d368e20ac..7f36d3e3c98b 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1665,6 +1665,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53125",
.vlans = 4096,
.enabled_ports = 0xff,
+ .arl_entries = 4,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 947ea352a57a..7fa19d4a8e13 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3233,6 +3233,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6390_port_pause_limit,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d3906f6b01bd..86058a9f3417 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1785,16 +1785,18 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
xgene_enet_gpiod_get(pdata);
- pdata->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pdata->clk)) {
- /* Abort if the clock is defined but couldn't be retrived.
- * Always abort if the clock is missing on DT system as
- * the driver can't cope with this case.
- */
- if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
- return PTR_ERR(pdata->clk);
- /* Firmware may have set up the clock already. */
- dev_info(dev, "clocks have been setup already\n");
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
+ pdata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pdata->clk)) {
+ /* Abort if the clock is defined but couldn't be
+ * retrived. Always abort if the clock is missing on
+ * DT system as the driver can't cope with this case.
+ */
+ if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
+ return PTR_ERR(pdata->clk);
+ /* Firmware may have set up the clock already. */
+ dev_info(dev, "clocks have been setup already\n");
+ }
}
if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 73aca97a96bc..d937083db9a4 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -50,11 +50,14 @@ static u32 platform_bgmac_idm_read(struct bgmac *bgmac, u16 offset)
static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
{
- return writel(value, bgmac->plat.idm_base + offset);
+ writel(value, bgmac->plat.idm_base + offset);
}
static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
{
+ if (!bgmac->plat.idm_base)
+ return true;
+
if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN)
return false;
if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
@@ -66,6 +69,9 @@ static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
{
u32 val;
+ if (!bgmac->plat.idm_base)
+ return;
+
/* The Reset Control register only contains a single bit to show if the
* controller is currently in reset. Do a sanity check here, just in
* case the bootloader happened to leave the device in reset.
@@ -180,6 +186,7 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4;
bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP;
bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP;
+ bgmac->feature_flags |= BGMAC_FEAT_IDM_MASK;
bgmac->dev = &pdev->dev;
bgmac->dma_dev = &pdev->dev;
@@ -207,15 +214,13 @@ static int bgmac_probe(struct platform_device *pdev)
return PTR_ERR(bgmac->plat.base);
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
- if (!regs) {
- dev_err(&pdev->dev, "Unable to obtain idm resource\n");
- return -EINVAL;
+ if (regs) {
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
+ bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
}
- bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
-
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
if (regs) {
bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index ba4d2e145bb9..48d672b204a4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -622,9 +622,11 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
- if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
- dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
- return -ENOTSUPP;
+ if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
+ if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
+ dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
+ return -ENOTSUPP;
+ }
}
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
@@ -855,9 +857,11 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
static void bgmac_miiconfig(struct bgmac *bgmac)
{
if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
- bgmac_idm_write(bgmac, BCMA_IOCTL,
- bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 |
- BGMAC_BCMA_IOCTL_SW_CLKEN);
+ if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
+ bgmac_idm_write(bgmac, BCMA_IOCTL,
+ bgmac_idm_read(bgmac, BCMA_IOCTL) |
+ 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN);
+ }
bgmac->mac_speed = SPEED_2500;
bgmac->mac_duplex = DUPLEX_FULL;
bgmac_mac_speed(bgmac);
@@ -874,11 +878,36 @@ static void bgmac_miiconfig(struct bgmac *bgmac)
}
}
+static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
+{
+ u32 iost;
+
+ iost = bgmac_idm_read(bgmac, BCMA_IOST);
+ if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
+ iost &= ~BGMAC_BCMA_IOST_ATTACHED;
+
+ /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
+ if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
+ u32 flags = 0;
+
+ if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+ flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+ if (!bgmac->has_robosw)
+ flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ }
+ bgmac_clk_enable(bgmac, flags);
+ }
+
+ if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+ bgmac_idm_write(bgmac, BCMA_IOCTL,
+ bgmac_idm_read(bgmac, BCMA_IOCTL) &
+ ~BGMAC_BCMA_IOCTL_SW_RESET);
+}
+
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
static void bgmac_chip_reset(struct bgmac *bgmac)
{
u32 cmdcfg_sr;
- u32 iost;
int i;
if (bgmac_clk_enabled(bgmac)) {
@@ -899,20 +928,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
/* TODO: Clear software multicast filter list */
}
- iost = bgmac_idm_read(bgmac, BCMA_IOST);
- if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
- iost &= ~BGMAC_BCMA_IOST_ATTACHED;
-
- /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
- if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
- u32 flags = 0;
- if (iost & BGMAC_BCMA_IOST_ATTACHED) {
- flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
- if (!bgmac->has_robosw)
- flags |= BGMAC_BCMA_IOCTL_SW_RESET;
- }
- bgmac_clk_enable(bgmac, flags);
- }
+ if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK))
+ bgmac_chip_reset_idm_config(bgmac);
/* Request Misc PLL for corerev > 2 */
if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
@@ -970,11 +987,6 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
}
- if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
- bgmac_idm_write(bgmac, BCMA_IOCTL,
- bgmac_idm_read(bgmac, BCMA_IOCTL) &
- ~BGMAC_BCMA_IOCTL_SW_RESET);
-
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
* Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
* BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
@@ -1497,8 +1509,10 @@ int bgmac_enet_probe(struct bgmac *bgmac)
bgmac_clk_enable(bgmac, 0);
/* This seems to be fixing IRQ by assigning OOB #6 to the core */
- if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
- bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
+ if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
+ if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
+ bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
+ }
bgmac_chip_reset(bgmac);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index c1818766c501..443d57b10264 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -425,6 +425,7 @@
#define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17)
#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18)
#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19)
+#define BGMAC_FEAT_IDM_MASK BIT(20)
struct bgmac_slot_info {
union {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 43423744fdfa..1e33abde4a3e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2886,7 +2886,7 @@ static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
static int bnx2x_test_nvram(struct bnx2x *bp)
{
- const struct crc_pair nvram_tbl[] = {
+ static const struct crc_pair nvram_tbl[] = {
{ 0, 0x14 }, /* bootstrap */
{ 0x14, 0xec }, /* dir */
{ 0x100, 0x350 }, /* manuf_info */
@@ -2895,7 +2895,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x708, 0x70 }, /* manuf_key_info */
{ 0, 0 }
};
- const struct crc_pair nvram_tbl2[] = {
+ static const struct crc_pair nvram_tbl2[] = {
{ 0x7e8, 0x350 }, /* manuf_info2 */
{ 0xb38, 0xf0 }, /* feature_info */
{ 0, 0 }
@@ -3162,7 +3162,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
memset(queue_name, 0, sizeof(queue_name));
- sprintf(queue_name, "%d", i);
+ snprintf(queue_name, sizeof(queue_name),
+ "%d", i);
for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
snprintf(buf + (k + j)*ETH_GSTRING_LEN,
ETH_GSTRING_LEN,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index daca1c9d254b..7b0b399aaedd 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1202,12 +1202,21 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
return tx_cb_ptr;
}
-/* Simple helper to free a control block's resources */
-static void bcmgenet_free_cb(struct enet_cb *cb)
+static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
{
- dev_kfree_skb_any(cb->skb);
- cb->skb = NULL;
- dma_unmap_addr_set(cb, dma_addr, 0);
+ struct enet_cb *tx_cb_ptr;
+
+ tx_cb_ptr = ring->cbs;
+ tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
+
+ /* Rewinding local write pointer */
+ if (ring->write_ptr == ring->cb_ptr)
+ ring->write_ptr = ring->end_ptr;
+ else
+ ring->write_ptr--;
+
+ return tx_cb_ptr;
}
static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
@@ -1260,18 +1269,72 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
INTRL2_CPU_MASK_SET);
}
+/* Simple helper to free a transmit control block's resources
+ * Returns an skb when the last transmit control block associated with the
+ * skb is freed. The skb should be freed by the caller if necessary.
+ */
+static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
+ struct enet_cb *cb)
+{
+ struct sk_buff *skb;
+
+ skb = cb->skb;
+
+ if (skb) {
+ cb->skb = NULL;
+ if (cb == GENET_CB(skb)->first_cb)
+ dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+
+ if (cb == GENET_CB(skb)->last_cb)
+ return skb;
+
+ } else if (dma_unmap_addr(cb, dma_addr)) {
+ dma_unmap_page(dev,
+ dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+
+ return 0;
+}
+
+/* Simple helper to free a receive control block's resources */
+static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
+ struct enet_cb *cb)
+{
+ struct sk_buff *skb;
+
+ skb = cb->skb;
+ cb->skb = NULL;
+
+ if (dma_unmap_addr(cb, dma_addr)) {
+ dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+
+ return skb;
+}
+
/* Unlocked version of the reclaim routine */
static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
- struct enet_cb *tx_cb_ptr;
- unsigned int pkts_compl = 0;
+ unsigned int txbds_processed = 0;
unsigned int bytes_compl = 0;
- unsigned int c_index;
+ unsigned int pkts_compl = 0;
unsigned int txbds_ready;
- unsigned int txbds_processed = 0;
+ unsigned int c_index;
+ struct sk_buff *skb;
/* Clear status before servicing to reduce spurious interrupts */
if (ring->index == DESC_INDEX)
@@ -1292,21 +1355,12 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
/* Reclaim transmitted buffers */
while (txbds_processed < txbds_ready) {
- tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
- if (tx_cb_ptr->skb) {
+ skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
+ &priv->tx_cbs[ring->clean_ptr]);
+ if (skb) {
pkts_compl++;
- bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
- dma_unmap_single(kdev,
- dma_unmap_addr(tx_cb_ptr, dma_addr),
- dma_unmap_len(tx_cb_ptr, dma_len),
- DMA_TO_DEVICE);
- bcmgenet_free_cb(tx_cb_ptr);
- } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
- dma_unmap_page(kdev,
- dma_unmap_addr(tx_cb_ptr, dma_addr),
- dma_unmap_len(tx_cb_ptr, dma_len),
- DMA_TO_DEVICE);
- dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
+ bytes_compl += GENET_CB(skb)->bytes_sent;
+ dev_kfree_skb_any(skb);
}
txbds_processed++;
@@ -1380,95 +1434,6 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
}
-/* Transmits a single SKB (either head of a fragment or a single SKB)
- * caller must hold priv->lock
- */
-static int bcmgenet_xmit_single(struct net_device *dev,
- struct sk_buff *skb,
- u16 dma_desc_flags,
- struct bcmgenet_tx_ring *ring)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
- struct enet_cb *tx_cb_ptr;
- unsigned int skb_len;
- dma_addr_t mapping;
- u32 length_status;
- int ret;
-
- tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
-
- if (unlikely(!tx_cb_ptr))
- BUG();
-
- tx_cb_ptr->skb = skb;
-
- skb_len = skb_headlen(skb);
-
- mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
- ret = dma_mapping_error(kdev, mapping);
- if (ret) {
- priv->mib.tx_dma_failed++;
- netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
- dev_kfree_skb(skb);
- return ret;
- }
-
- dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
- length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
- (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
- DMA_TX_APPEND_CRC;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- length_status |= DMA_TX_DO_CSUM;
-
- dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
-
- return 0;
-}
-
-/* Transmit a SKB fragment */
-static int bcmgenet_xmit_frag(struct net_device *dev,
- skb_frag_t *frag,
- u16 dma_desc_flags,
- struct bcmgenet_tx_ring *ring)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
- struct enet_cb *tx_cb_ptr;
- unsigned int frag_size;
- dma_addr_t mapping;
- int ret;
-
- tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
-
- if (unlikely(!tx_cb_ptr))
- BUG();
-
- tx_cb_ptr->skb = NULL;
-
- frag_size = skb_frag_size(frag);
-
- mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
- ret = dma_mapping_error(kdev, mapping);
- if (ret) {
- priv->mib.tx_dma_failed++;
- netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
- __func__);
- return ret;
- }
-
- dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
-
- dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
- (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
- (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
-
- return 0;
-}
-
/* Reallocate the SKB to put enough headroom in front of it and insert
* the transmit checksum offsets in the descriptors
*/
@@ -1535,11 +1500,16 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
struct bcmgenet_tx_ring *ring = NULL;
+ struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
unsigned long flags = 0;
int nr_frags, index;
- u16 dma_desc_flags;
+ dma_addr_t mapping;
+ unsigned int size;
+ skb_frag_t *frag;
+ u32 len_stat;
int ret;
int i;
@@ -1592,29 +1562,53 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- dma_desc_flags = DMA_SOP;
- if (nr_frags == 0)
- dma_desc_flags |= DMA_EOP;
+ for (i = 0; i <= nr_frags; i++) {
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
- /* Transmit single SKB or head of fragment list */
- ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
- if (ret) {
- ret = NETDEV_TX_OK;
- goto out;
- }
+ if (unlikely(!tx_cb_ptr))
+ BUG();
+
+ if (!i) {
+ /* Transmit single SKB or head of fragment list */
+ GENET_CB(skb)->first_cb = tx_cb_ptr;
+ size = skb_headlen(skb);
+ mapping = dma_map_single(kdev, skb->data, size,
+ DMA_TO_DEVICE);
+ } else {
+ /* xmit fragment */
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ size = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(kdev, frag, 0, size,
+ DMA_TO_DEVICE);
+ }
- /* xmit fragment */
- for (i = 0; i < nr_frags; i++) {
- ret = bcmgenet_xmit_frag(dev,
- &skb_shinfo(skb)->frags[i],
- (i == nr_frags - 1) ? DMA_EOP : 0,
- ring);
+ ret = dma_mapping_error(kdev, mapping);
if (ret) {
+ priv->mib.tx_dma_failed++;
+ netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
ret = NETDEV_TX_OK;
- goto out;
+ goto out_unmap_frags;
+ }
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, size);
+
+ tx_cb_ptr->skb = skb;
+
+ len_stat = (size << DMA_BUFLENGTH_SHIFT) |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
+
+ if (!i) {
+ len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ len_stat |= DMA_TX_DO_CSUM;
}
+ if (i == nr_frags)
+ len_stat |= DMA_EOP;
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
}
+ GENET_CB(skb)->last_cb = tx_cb_ptr;
skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */
@@ -1635,6 +1629,19 @@ out:
spin_unlock_irqrestore(&ring->lock, flags);
return ret;
+
+out_unmap_frags:
+ /* Back up for failed control block mapping */
+ bcmgenet_put_txcb(priv, ring);
+
+ /* Unmap successfully mapped control blocks */
+ while (i-- > 0) {
+ tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
+ bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
+ }
+
+ dev_kfree_skb(skb);
+ goto out;
}
static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
@@ -1666,14 +1673,12 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
}
/* Grab the current Rx skb from the ring and DMA-unmap it */
- rx_skb = cb->skb;
- if (likely(rx_skb))
- dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
- priv->rx_buf_len, DMA_FROM_DEVICE);
+ rx_skb = bcmgenet_free_rx_cb(kdev, cb);
/* Put the new Rx skb on the ring */
cb->skb = skb;
dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
dmadesc_set_addr(priv, cb->bd_addr, mapping);
/* Return the current Rx skb to caller */
@@ -1880,22 +1885,16 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
{
- struct device *kdev = &priv->pdev->dev;
+ struct sk_buff *skb;
struct enet_cb *cb;
int i;
for (i = 0; i < priv->num_rx_bds; i++) {
cb = &priv->rx_cbs[i];
- if (dma_unmap_addr(cb, dma_addr)) {
- dma_unmap_single(kdev,
- dma_unmap_addr(cb, dma_addr),
- priv->rx_buf_len, DMA_FROM_DEVICE);
- dma_unmap_addr_set(cb, dma_addr, 0);
- }
-
- if (cb->skb)
- bcmgenet_free_cb(cb);
+ skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
+ if (skb)
+ dev_kfree_skb_any(skb);
}
}
@@ -2479,8 +2478,10 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
- int i;
struct netdev_queue *txq;
+ struct sk_buff *skb;
+ struct enet_cb *cb;
+ int i;
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
@@ -2489,10 +2490,10 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_dma_teardown(priv);
for (i = 0; i < priv->num_tx_bds; i++) {
- if (priv->tx_cbs[i].skb != NULL) {
- dev_kfree_skb(priv->tx_cbs[i].skb);
- priv->tx_cbs[i].skb = NULL;
- }
+ cb = priv->tx_cbs + i;
+ skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
+ if (skb)
+ dev_kfree_skb(skb);
}
for (i = 0; i < priv->hw_params->tx_queues; i++) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index efd07020b89f..b9344de669f8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -544,6 +544,8 @@ struct bcmgenet_hw_params {
};
struct bcmgenet_skb_cb {
+ struct enet_cb *first_cb; /* First control block of SKB */
+ struct enet_cb *last_cb; /* Last control block of SKB */
unsigned int bytes_sent; /* bytes on the wire (no TSB) */
};
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 976a50f67551..09e287597c74 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -336,7 +336,7 @@ lio_ethtool_get_channels(struct net_device *dev,
static int lio_get_eeprom_len(struct net_device *netdev)
{
- u8 buf[128];
+ u8 buf[192];
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
struct octeon_board_info *board_info;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index a0ca68ce3fbb..79112563a25a 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1008,7 +1008,7 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
{
struct device *dev = &bgx->pdev->dev;
struct lmac *lmac;
- char str[20];
+ char str[27];
if (!bgx->is_dlm && lmacid)
return;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 50517cfd9671..9f9d6cae39d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -441,7 +441,8 @@ void cxgb4_ptp_init(struct adapter *adapter)
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
&adapter->pdev->dev);
- if (!adapter->ptp_clock) {
+ if (IS_ERR_OR_NULL(adapter->ptp_clock)) {
+ adapter->ptp_clock = NULL;
dev_err(adapter->pdev_dev,
"PTP %s Clock registration has failed\n", __func__);
return;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 99987d8e437e..aa28299aef5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -174,6 +174,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */
+ CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */
+ CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index ff864a187d5a..a37166ee577b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -776,8 +776,9 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
assert(handle);
mac_cb = hns_get_mac_cb(handle);
- if (!mac_cb->cpld_ctrl)
+ if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
return;
+
hns_set_led_opt(mac_cb);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 7a8addda726e..408b63faf9a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -53,6 +53,34 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
return ret;
}
+static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
+ u32 link, u32 port, u32 act)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = link;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+ obj_args[2].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[2].integer.value = act;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 3;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_warn(mac_cb->dev, "ledctrl fail, link:%d port:%d act:%d!\n",
+ link, port, act);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
u16 speed, int data)
{
@@ -93,6 +121,18 @@ static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
}
}
+static void hns_cpld_set_led_acpi(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data)
+{
+ if (!mac_cb) {
+ pr_err("cpld_led_set mac_cb is null!\n");
+ return;
+ }
+
+ hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+ link_status, mac_cb->mac_id, data);
+}
+
static void cpld_led_reset(struct hns_mac_cb *mac_cb)
{
if (!mac_cb || !mac_cb->cpld_ctrl)
@@ -103,6 +143,20 @@ static void cpld_led_reset(struct hns_mac_cb *mac_cb)
mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
}
+static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
+{
+ if (!mac_cb) {
+ pr_err("cpld_led_reset mac_cb is null!\n");
+ return;
+ }
+
+ if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
+ return;
+
+ hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+ 0, mac_cb->mac_id, 0);
+}
+
static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
@@ -604,8 +658,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
} else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
- misc_op->cpld_set_led = hns_cpld_set_led;
- misc_op->cpld_reset_led = cpld_led_reset;
+ misc_op->cpld_set_led = hns_cpld_set_led_acpi;
+ misc_op->cpld_reset_led = cpld_led_reset_acpi;
misc_op->cpld_set_led_id = cpld_set_led_id;
misc_op->dsaf_reset = hns_dsaf_rst_acpi;
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 249a4584401a..b651c1210555 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -283,7 +283,7 @@ int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
}
/* Should be called under a lock */
-static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
+static void __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
{
struct mlx4_zone_allocator *zone_alloc = entry->allocator;
@@ -315,8 +315,6 @@ static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
}
zone_alloc->mask = mask;
}
-
- return 0;
}
void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
@@ -457,7 +455,7 @@ struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32
int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
{
struct mlx4_zone_entry *zone;
- int res;
+ int res = 0;
spin_lock(&zones->lock);
@@ -468,7 +466,7 @@ int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
goto out;
}
- res = __mlx4_zone_remove_one_entry(zone);
+ __mlx4_zone_remove_one_entry(zone);
out:
spin_unlock(&zones->lock);
@@ -578,7 +576,7 @@ out:
}
static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
- struct mlx4_buf *buf, gfp_t gfp)
+ struct mlx4_buf *buf)
{
dma_addr_t t;
@@ -587,7 +585,7 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->direct.buf =
dma_zalloc_coherent(&dev->persist->pdev->dev,
- size, &t, gfp);
+ size, &t, GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
@@ -607,10 +605,10 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
* multiple pages, so we don't require too much contiguous memory.
*/
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf, gfp_t gfp)
+ struct mlx4_buf *buf)
{
if (size <= max_direct) {
- return mlx4_buf_direct_alloc(dev, size, buf, gfp);
+ return mlx4_buf_direct_alloc(dev, size, buf);
} else {
dma_addr_t t;
int i;
@@ -620,14 +618,14 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- gfp);
+ GFP_KERNEL);
if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
dma_zalloc_coherent(&dev->persist->pdev->dev,
- PAGE_SIZE, &t, gfp);
+ PAGE_SIZE, &t, GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
@@ -663,12 +661,11 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
-static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
- gfp_t gfp)
+static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
{
struct mlx4_db_pgdir *pgdir;
- pgdir = kzalloc(sizeof *pgdir, gfp);
+ pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir)
return NULL;
@@ -676,7 +673,7 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
pgdir->bits[0] = pgdir->order0;
pgdir->bits[1] = pgdir->order1;
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
- &pgdir->db_dma, gfp);
+ &pgdir->db_dma, GFP_KERNEL);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
@@ -716,7 +713,7 @@ found:
return 0;
}
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
@@ -728,7 +725,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
goto out;
- pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp);
+ pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev);
if (!pgdir) {
ret = -ENOMEM;
goto out;
@@ -780,13 +777,13 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
{
int err;
- err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL);
+ err = mlx4_db_alloc(dev, &wqres->db, 1);
if (err)
return err;
*wqres->db.db = 0;
- err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL);
+ err = mlx4_buf_direct_alloc(dev, size, &wqres->buf);
if (err)
goto err_db;
@@ -795,7 +792,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
if (err)
goto err_mtt;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index fa6d2354a0e9..c56a511b918e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -224,11 +224,11 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
if (*cqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &cq_table->table, *cqn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
if (err)
goto err_put;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index e5fb89505a13..436f7689a032 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1042,7 +1042,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
if (!context)
return -ENOMEM;
- err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, qpn, qp);
if (err) {
en_err(priv, "Failed to allocate qp #%x\n", qpn);
goto out;
@@ -1086,7 +1086,7 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
en_err(priv, "Failed reserving drop qpn\n");
return err;
}
- err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
if (err) {
en_err(priv, "Failed allocating drop qp\n");
mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
@@ -1158,8 +1158,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
}
/* Configure RSS indirection qp */
- err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp,
- GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
goto rss_err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4f3a9b27ce4a..73faa3d77921 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -111,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
goto err_hwq_res;
}
- err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp);
if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
goto err_reserve;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index e1f9e7cebf8f..5a7816e7c7b4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -251,8 +251,7 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
- gfp_t gfp)
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
{
u32 i = (obj & (table->num_obj - 1)) /
(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
@@ -266,7 +265,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
}
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
- (table->lowmem ? gfp : GFP_HIGHUSER) |
+ (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN, table->coherent);
if (!table->icm[i]) {
ret = -ENOMEM;
@@ -363,7 +362,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u32 i;
for (i = start; i <= end; i += inc) {
- err = mlx4_table_get(dev, table, i, GFP_KERNEL);
+ err = mlx4_table_get(dev, table, i);
if (err)
goto fail;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index 0c7364550150..dee67fa39107 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -71,8 +71,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
gfp_t gfp_mask, int coherent);
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
- gfp_t gfp);
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u32 start, u32 end);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 30616cd0140d..706d7f21ac5c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -969,7 +969,7 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
-int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
@@ -977,7 +977,7 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
int __mlx4_mpt_reserve(struct mlx4_dev *dev);
void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
-int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp);
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index ce852ca22a96..24282cd017d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -479,14 +479,14 @@ static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
__mlx4_mpt_release(dev, index);
}
-int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
- return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp);
+ return mlx4_table_get(dev, &mr_table->dmpt_table, index);
}
-static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
+static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
u64 param = 0;
@@ -497,7 +497,7 @@ static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
- return __mlx4_mpt_alloc_icm(dev, index, gfp);
+ return __mlx4_mpt_alloc_icm(dev, index);
}
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
@@ -629,7 +629,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL);
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
@@ -787,14 +787,13 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- struct mlx4_buf *buf, gfp_t gfp)
+ struct mlx4_buf *buf)
{
u64 *page_list;
int err;
int i;
- page_list = kmalloc(buf->npages * sizeof *page_list,
- gfp);
+ page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
return -ENOMEM;
@@ -841,7 +840,7 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL);
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 5a310d313e94..26747212526b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -301,29 +301,29 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
-int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
if (err)
goto err_put_qp;
- err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
if (err)
goto err_put_auxc;
- err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
if (err)
goto err_put_altc;
- err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
if (err)
goto err_put_rdmarc;
@@ -345,7 +345,7 @@ err_out:
return err;
}
-static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
u64 param = 0;
@@ -355,7 +355,7 @@ static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
- return __mlx4_qp_alloc_icm(dev, qpn, gfp);
+ return __mlx4_qp_alloc_icm(dev, qpn);
}
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
@@ -397,7 +397,7 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
return qp;
}
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
@@ -408,7 +408,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
qp->qpn = qpn;
- err = mlx4_qp_alloc_icm(dev, qpn, gfp);
+ err = mlx4_qp_alloc_icm(dev, qpn);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 812783865205..215e21c3dc8a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1822,7 +1822,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
if (!fw_reserved(dev, qpn)) {
- err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
+ err = __mlx4_qp_alloc_icm(dev, qpn);
if (err) {
res_abort_move(dev, slave, RES_QP, qpn);
return err;
@@ -1909,7 +1909,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
return err;
- err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
+ err = __mlx4_mpt_alloc_icm(dev, mpt->key);
if (err) {
res_abort_move(dev, slave, RES_MPT, id);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index f44d089e2ca6..bedf52126824 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -100,11 +100,11 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
if (*srqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &srq_table->table, *srqn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
if (err)
goto err_put;
return 0;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 746d94e28470..60850bfa3d32 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -766,11 +766,13 @@ static void emac_shutdown(struct platform_device *pdev)
struct emac_adapter *adpt = netdev_priv(netdev);
struct emac_sgmii *sgmii = &adpt->phy;
- /* Closing the SGMII turns off its interrupts */
- sgmii->close(adpt);
+ if (netdev->flags & IFF_UP) {
+ /* Closing the SGMII turns off its interrupts */
+ sgmii->close(adpt);
- /* Resetting the MAC turns off all DMA and its interrupts */
- emac_mac_reset(adpt);
+ /* Resetting the MAC turns off all DMA and its interrupts */
+ emac_mac_reset(adpt);
+ }
}
static struct platform_driver emac_platform_driver = {
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index b607936e1b3e..9c0488e0f08e 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -90,17 +90,13 @@ struct ioc3_private {
spinlock_t ioc3_lock;
struct mii_if_info mii;
+ struct net_device *dev;
struct pci_dev *pdev;
/* Members used by autonegotiation */
struct timer_list ioc3_timer;
};
-static inline struct net_device *priv_netdev(struct ioc3_private *dev)
-{
- return (void *)dev - ((sizeof(struct net_device) + 31) & ~31);
-}
-
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void ioc3_set_multicast_list(struct net_device *dev);
static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -427,7 +423,7 @@ static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
nic[i] = nic_read_byte(ioc3);
for (i = 2; i < 8; i++)
- priv_netdev(ip)->dev_addr[i - 2] = nic[i];
+ ip->dev->dev_addr[i - 2] = nic[i];
}
/*
@@ -439,7 +435,7 @@ static void ioc3_get_eaddr(struct ioc3_private *ip)
{
ioc3_get_eaddr_nic(ip);
- printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr);
+ printk("Ethernet address is %pM.\n", ip->dev->dev_addr);
}
static void __ioc3_set_mac_address(struct net_device *dev)
@@ -790,13 +786,12 @@ static void ioc3_timer(unsigned long data)
*/
static int ioc3_mii_init(struct ioc3_private *ip)
{
- struct net_device *dev = priv_netdev(ip);
int i, found = 0, res = 0;
int ioc3_phy_workaround = 1;
u16 word;
for (i = 0; i < 32; i++) {
- word = ioc3_mdio_read(dev, i, MII_PHYSID1);
+ word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
if (word != 0xffff && word != 0x0000) {
found = 1;
@@ -1276,6 +1271,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(dev, &pdev->dev);
ip = netdev_priv(dev);
+ ip->dev = dev;
dev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f233bf8b4ebb..c4407e8e39a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -117,7 +117,7 @@ static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
void __iomem *ioaddr = hw->pcsr;
u32 value;
- const struct stmmac_rx_routing route_possibilities[] = {
+ static const struct stmmac_rx_routing route_possibilities[] = {
{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1853f7ff6657..1763e48c84e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4120,8 +4120,15 @@ int stmmac_dvr_probe(struct device *device,
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
- if (priv->plat->stmmac_rst)
+ if (priv->plat->stmmac_rst) {
+ ret = reset_control_assert(priv->plat->stmmac_rst);
reset_control_deassert(priv->plat->stmmac_rst);
+ /* Some reset controllers have only reset callback instead of
+ * assert + deassert callbacks pair.
+ */
+ if (ret == -ENOTSUPP)
+ reset_control_reset(priv->plat->stmmac_rst);
+ }
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 46cb7f8955a2..4bb04aaf9650 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9532,7 +9532,7 @@ static struct niu_parent *niu_get_parent(struct niu *np,
p = niu_new_parent(np, id, ptype);
if (p) {
- char port_name[6];
+ char port_name[8];
int err;
sprintf(port_name, "port%d", port);
@@ -9553,7 +9553,7 @@ static void niu_put_parent(struct niu *np)
{
struct niu_parent *p = np->parent;
u8 port = np->port;
- char port_name[6];
+ char port_name[8];
BUG_ON(!p || p->ports[port] != np);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 711fbbbc4b1f..163d8d16bc24 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -654,6 +654,8 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
RET(-EFAULT);
}
DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
+ } else {
+ return -EOPNOTSUPP;
}
if (!capable(CAP_SYS_RAWIO))
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1850e348f555..badd0a8caeb9 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -3089,6 +3089,31 @@ static int cpsw_probe(struct platform_device *pdev)
cpsw->quirk_irq = true;
}
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
+ netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
+ cpsw_split_res(ndev);
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->dev, "error registering net device\n");
+ ret = -ENODEV;
+ goto clean_ale_ret;
+ }
+
+ if (cpsw->data.dual_emac) {
+ ret = cpsw_probe_dual_emac(priv);
+ if (ret) {
+ cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
+ goto clean_unregister_netdev_ret;
+ }
+ }
+
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
* MISC IRQs which are always kept disabled with this driver so
* we will not request them.
@@ -3127,33 +3152,9 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
- ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
- ndev->netdev_ops = &cpsw_netdev_ops;
- ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
- netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
- cpsw_split_res(ndev);
-
- /* register the network device */
- SET_NETDEV_DEV(ndev, &pdev->dev);
- ret = register_netdev(ndev);
- if (ret) {
- dev_err(priv->dev, "error registering net device\n");
- ret = -ENODEV;
- goto clean_ale_ret;
- }
-
cpsw_notice(priv, probe,
"initialized device (regs %pa, irq %d, pool size %d)\n",
&ss_res->start, ndev->irq, dma_params.descs_pool_size);
- if (cpsw->data.dual_emac) {
- ret = cpsw_probe_dual_emac(priv);
- if (ret) {
- cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
- goto clean_unregister_netdev_ret;
- }
- }
pm_runtime_put(&pdev->dev);
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 4daf3d0926a8..0250aa9ae2cb 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -418,6 +418,8 @@ static int ntb_netdev_probe(struct device *client_dev)
if (!ndev)
return -ENOMEM;
+ SET_NETDEV_DEV(ndev, client_dev);
+
dev = netdev_priv(ndev);
dev->ndev = ndev;
dev->pdev = pdev;
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 00755b6a42cf..c608e1dfaf09 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -135,8 +135,8 @@ int mdio_mux_init(struct device *dev,
for_each_available_child_of_node(dev->of_node, child_bus_node) {
int v;
- v = of_mdio_parse_addr(dev, child_bus_node);
- if (v < 0) {
+ r = of_property_read_u32(child_bus_node, "reg", &v);
+ if (r) {
dev_err(dev,
"Error: Failed to find reg for child %s\n",
of_node_full_name(child_bus_node));
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 13028833bee3..bd4303944e44 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -120,6 +120,7 @@ struct ppp {
int n_channels; /* how many channels are attached 54 */
spinlock_t rlock; /* lock for receive side 58 */
spinlock_t wlock; /* lock for transmit side 5c */
+ int *xmit_recursion __percpu; /* xmit recursion detect */
int mru; /* max receive unit 60 */
unsigned int flags; /* control bits 64 */
unsigned int xstate; /* transmit state bits 68 */
@@ -1025,6 +1026,7 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
struct ppp *ppp = netdev_priv(dev);
int indx;
int err;
+ int cpu;
ppp->dev = dev;
ppp->ppp_net = src_net;
@@ -1039,6 +1041,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
INIT_LIST_HEAD(&ppp->channels);
spin_lock_init(&ppp->rlock);
spin_lock_init(&ppp->wlock);
+
+ ppp->xmit_recursion = alloc_percpu(int);
+ if (!ppp->xmit_recursion) {
+ err = -ENOMEM;
+ goto err1;
+ }
+ for_each_possible_cpu(cpu)
+ (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
+
#ifdef CONFIG_PPP_MULTILINK
ppp->minseq = -1;
skb_queue_head_init(&ppp->mrq);
@@ -1050,11 +1061,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
if (err < 0)
- return err;
+ goto err2;
conf->file->private_data = &ppp->file;
return 0;
+err2:
+ free_percpu(ppp->xmit_recursion);
+err1:
+ return err;
}
static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
@@ -1400,18 +1415,16 @@ static void __ppp_xmit_process(struct ppp *ppp)
ppp_xmit_unlock(ppp);
}
-static DEFINE_PER_CPU(int, ppp_xmit_recursion);
-
static void ppp_xmit_process(struct ppp *ppp)
{
local_bh_disable();
- if (unlikely(__this_cpu_read(ppp_xmit_recursion)))
+ if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
goto err;
- __this_cpu_inc(ppp_xmit_recursion);
+ (*this_cpu_ptr(ppp->xmit_recursion))++;
__ppp_xmit_process(ppp);
- __this_cpu_dec(ppp_xmit_recursion);
+ (*this_cpu_ptr(ppp->xmit_recursion))--;
local_bh_enable();
@@ -1905,7 +1918,7 @@ static void __ppp_channel_push(struct channel *pch)
read_lock(&pch->upl);
ppp = pch->ppp;
if (ppp)
- __ppp_xmit_process(ppp);
+ ppp_xmit_process(ppp);
read_unlock(&pch->upl);
}
}
@@ -1914,9 +1927,7 @@ static void ppp_channel_push(struct channel *pch)
{
local_bh_disable();
- __this_cpu_inc(ppp_xmit_recursion);
__ppp_channel_push(pch);
- __this_cpu_dec(ppp_xmit_recursion);
local_bh_enable();
}
@@ -3057,6 +3068,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
#endif /* CONFIG_PPP_FILTER */
kfree_skb(ppp->xmit_pending);
+ free_percpu(ppp->xmit_recursion);
free_netdev(ppp->dev);
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index b401ba9e6ddc..811b18215cae 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -768,8 +768,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
u8 *buf;
int len;
int temp;
+ int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
+ u16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -874,6 +876,32 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
+ /*
+ * Some Huawei devices have been observed to come out of reset in NDP32 mode.
+ * Let's check if this is the case, and set the device to NDP16 mode again if
+ * needed.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
+ err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+ 0, iface_no, &curr_ntb_format, 2);
+ if (err < 0) {
+ goto error2;
+ }
+
+ if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
+ dev_info(&intf->dev, "resetting NTB format to 16-bit");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
+
+ if (err < 0)
+ goto error2;
+ }
+ }
+
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out || !dev->status) {
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 2680a65cd5e4..63f28908afda 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -80,6 +80,12 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
* be at the end of the frame.
*/
drvflags |= CDC_NCM_FLAG_NDP_TO_END;
+
+ /* Additionally, it has been reported that some Huawei E3372H devices, with
+ * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
+ * needing to be set to the NTB16 one again.
+ */
+ drvflags |= CDC_NCM_FLAG_RESET_NTB16;
ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 2dfca96a63b6..340c13484e5c 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -898,6 +898,7 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.set_wol = smsc95xx_ethtool_set_wol,
.get_link_ksettings = smsc95xx_get_link_ksettings,
.set_link_ksettings = smsc95xx_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index ba1c9f93592b..9c51b8be0038 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -311,7 +311,7 @@ struct vmxnet3_intr {
u8 num_intrs; /* # of intr vectors */
u8 event_intr_idx; /* idx of the intr vector for event */
u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
- char event_msi_vector_name[IFNAMSIZ+11];
+ char event_msi_vector_name[IFNAMSIZ+17];
#ifdef CONFIG_PCI_MSI
struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
#endif
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 6e2e760d98b1..0b75def39c6c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -5704,7 +5704,7 @@ static void rt2800_init_freq_calibration(struct rt2x00_dev *rt2x00dev)
static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev)
{
- const u8 glrt_table[] = {
+ static const u8 glrt_table[] = {
0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */
0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */
0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */
diff --git a/drivers/ntb/hw/Kconfig b/drivers/ntb/hw/Kconfig
index 7116472b4625..a89243c9fdd3 100644
--- a/drivers/ntb/hw/Kconfig
+++ b/drivers/ntb/hw/Kconfig
@@ -1,2 +1,3 @@
source "drivers/ntb/hw/amd/Kconfig"
+source "drivers/ntb/hw/idt/Kconfig"
source "drivers/ntb/hw/intel/Kconfig"
diff --git a/drivers/ntb/hw/Makefile b/drivers/ntb/hw/Makefile
index 532e0859b4a1..87332c3905f0 100644
--- a/drivers/ntb/hw/Makefile
+++ b/drivers/ntb/hw/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_NTB_AMD) += amd/
+obj-$(CONFIG_NTB_IDT) += idt/
obj-$(CONFIG_NTB_INTEL) += intel/
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 019a158e1128..f0788aae05c9 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -5,6 +5,7 @@
* GPL LICENSE SUMMARY
*
* Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
+ * Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -13,6 +14,7 @@
* BSD LICENSE
*
* Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
+ * Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -79,40 +81,42 @@ static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx)
return 1 << idx;
}
-static int amd_ntb_mw_count(struct ntb_dev *ntb)
+static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx)
{
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
return ntb_ndev(ntb)->mw_count;
}
-static int amd_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
- phys_addr_t *base,
- resource_size_t *size,
- resource_size_t *align,
- resource_size_t *align_size)
+static int amd_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
int bar;
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
bar = ndev_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
- if (base)
- *base = pci_resource_start(ndev->ntb.pdev, bar);
-
- if (size)
- *size = pci_resource_len(ndev->ntb.pdev, bar);
+ if (addr_align)
+ *addr_align = SZ_4K;
- if (align)
- *align = SZ_4K;
+ if (size_align)
+ *size_align = 1;
- if (align_size)
- *align_size = 1;
+ if (size_max)
+ *size_max = pci_resource_len(ndev->ntb.pdev, bar);
return 0;
}
-static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
+static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
dma_addr_t addr, resource_size_t size)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
@@ -122,11 +126,14 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
u64 base_addr, limit, reg_val;
int bar;
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
bar = ndev_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
- mw_size = pci_resource_len(ndev->ntb.pdev, bar);
+ mw_size = pci_resource_len(ntb->pdev, bar);
/* make sure the range fits in the usable mw size */
if (size > mw_size)
@@ -135,7 +142,7 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
mmio = ndev->self_mmio;
peer_mmio = ndev->peer_mmio;
- base_addr = pci_resource_start(ndev->ntb.pdev, bar);
+ base_addr = pci_resource_start(ntb->pdev, bar);
if (bar != 1) {
xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2);
@@ -212,7 +219,7 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev)
return 0;
}
-static int amd_ntb_link_is_up(struct ntb_dev *ntb,
+static u64 amd_ntb_link_is_up(struct ntb_dev *ntb,
enum ntb_speed *speed,
enum ntb_width *width)
{
@@ -225,7 +232,7 @@ static int amd_ntb_link_is_up(struct ntb_dev *ntb,
if (width)
*width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
- dev_dbg(ndev_dev(ndev), "link is up.\n");
+ dev_dbg(&ntb->pdev->dev, "link is up.\n");
ret = 1;
} else {
@@ -234,7 +241,7 @@ static int amd_ntb_link_is_up(struct ntb_dev *ntb,
if (width)
*width = NTB_WIDTH_NONE;
- dev_dbg(ndev_dev(ndev), "link is down.\n");
+ dev_dbg(&ntb->pdev->dev, "link is down.\n");
}
return ret;
@@ -254,7 +261,7 @@ static int amd_ntb_link_enable(struct ntb_dev *ntb,
if (ndev->ntb.topo == NTB_TOPO_SEC)
return -EINVAL;
- dev_dbg(ndev_dev(ndev), "Enabling Link.\n");
+ dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
@@ -275,7 +282,7 @@ static int amd_ntb_link_disable(struct ntb_dev *ntb)
if (ndev->ntb.topo == NTB_TOPO_SEC)
return -EINVAL;
- dev_dbg(ndev_dev(ndev), "Enabling Link.\n");
+ dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
@@ -284,6 +291,31 @@ static int amd_ntb_link_disable(struct ntb_dev *ntb)
return 0;
}
+static int amd_ntb_peer_mw_count(struct ntb_dev *ntb)
+{
+ /* The same as for inbound MWs */
+ return ntb_ndev(ntb)->mw_count;
+}
+
+static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
+ phys_addr_t *base, resource_size_t *size)
+{
+ struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+ int bar;
+
+ bar = ndev_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ if (base)
+ *base = pci_resource_start(ndev->ntb.pdev, bar);
+
+ if (size)
+ *size = pci_resource_len(ndev->ntb.pdev, bar);
+
+ return 0;
+}
+
static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb)
{
return ntb_ndev(ntb)->db_valid_mask;
@@ -400,30 +432,30 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb,
return 0;
}
-static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
+static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
u32 offset;
- if (idx < 0 || idx >= ndev->spad_count)
+ if (sidx < 0 || sidx >= ndev->spad_count)
return -EINVAL;
- offset = ndev->peer_spad + (idx << 2);
+ offset = ndev->peer_spad + (sidx << 2);
return readl(mmio + AMD_SPAD_OFFSET + offset);
}
-static int amd_ntb_peer_spad_write(struct ntb_dev *ntb,
- int idx, u32 val)
+static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
+ int sidx, u32 val)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
u32 offset;
- if (idx < 0 || idx >= ndev->spad_count)
+ if (sidx < 0 || sidx >= ndev->spad_count)
return -EINVAL;
- offset = ndev->peer_spad + (idx << 2);
+ offset = ndev->peer_spad + (sidx << 2);
writel(val, mmio + AMD_SPAD_OFFSET + offset);
return 0;
@@ -431,8 +463,10 @@ static int amd_ntb_peer_spad_write(struct ntb_dev *ntb,
static const struct ntb_dev_ops amd_ntb_ops = {
.mw_count = amd_ntb_mw_count,
- .mw_get_range = amd_ntb_mw_get_range,
+ .mw_get_align = amd_ntb_mw_get_align,
.mw_set_trans = amd_ntb_mw_set_trans,
+ .peer_mw_count = amd_ntb_peer_mw_count,
+ .peer_mw_get_addr = amd_ntb_peer_mw_get_addr,
.link_is_up = amd_ntb_link_is_up,
.link_enable = amd_ntb_link_enable,
.link_disable = amd_ntb_link_disable,
@@ -466,18 +500,19 @@ static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit)
static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
{
void __iomem *mmio = ndev->self_mmio;
+ struct device *dev = &ndev->ntb.pdev->dev;
u32 status;
status = readl(mmio + AMD_INTSTAT_OFFSET);
if (!(status & AMD_EVENT_INTMASK))
return;
- dev_dbg(ndev_dev(ndev), "status = 0x%x and vec = %d\n", status, vec);
+ dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec);
status &= AMD_EVENT_INTMASK;
switch (status) {
case AMD_PEER_FLUSH_EVENT:
- dev_info(ndev_dev(ndev), "Flush is done.\n");
+ dev_info(dev, "Flush is done.\n");
break;
case AMD_PEER_RESET_EVENT:
amd_ack_smu(ndev, AMD_PEER_RESET_EVENT);
@@ -503,7 +538,7 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
status = readl(mmio + AMD_PMESTAT_OFFSET);
/* check if this is WAKEUP event */
if (status & 0x1)
- dev_info(ndev_dev(ndev), "Wakeup is done.\n");
+ dev_info(dev, "Wakeup is done.\n");
amd_ack_smu(ndev, AMD_PEER_D0_EVENT);
@@ -512,14 +547,14 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
AMD_LINK_HB_TIMEOUT);
break;
default:
- dev_info(ndev_dev(ndev), "event status = 0x%x.\n", status);
+ dev_info(dev, "event status = 0x%x.\n", status);
break;
}
}
static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec)
{
- dev_dbg(ndev_dev(ndev), "vec %d\n", vec);
+ dev_dbg(&ndev->ntb.pdev->dev, "vec %d\n", vec);
if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1))
amd_handle_event(ndev, vec);
@@ -541,7 +576,7 @@ static irqreturn_t ndev_irq_isr(int irq, void *dev)
{
struct amd_ntb_dev *ndev = dev;
- return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
+ return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
}
static int ndev_init_isr(struct amd_ntb_dev *ndev,
@@ -550,7 +585,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev,
struct pci_dev *pdev;
int rc, i, msix_count, node;
- pdev = ndev_pdev(ndev);
+ pdev = ndev->ntb.pdev;
node = dev_to_node(&pdev->dev);
@@ -592,7 +627,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev,
goto err_msix_request;
}
- dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
+ dev_dbg(&pdev->dev, "Using msix interrupts\n");
ndev->db_count = msix_min;
ndev->msix_vec_count = msix_max;
return 0;
@@ -619,7 +654,7 @@ err_msix_vec_alloc:
if (rc)
goto err_msi_request;
- dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
+ dev_dbg(&pdev->dev, "Using msi interrupts\n");
ndev->db_count = 1;
ndev->msix_vec_count = 1;
return 0;
@@ -636,7 +671,7 @@ err_msi_enable:
if (rc)
goto err_intx_request;
- dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
+ dev_dbg(&pdev->dev, "Using intx interrupts\n");
ndev->db_count = 1;
ndev->msix_vec_count = 1;
return 0;
@@ -651,7 +686,7 @@ static void ndev_deinit_isr(struct amd_ntb_dev *ndev)
void __iomem *mmio = ndev->self_mmio;
int i;
- pdev = ndev_pdev(ndev);
+ pdev = ndev->ntb.pdev;
/* Mask all doorbell interrupts */
ndev->db_mask = ndev->db_valid_mask;
@@ -777,7 +812,8 @@ static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
ndev->debugfs_info = NULL;
} else {
ndev->debugfs_dir =
- debugfs_create_dir(ndev_name(ndev), debugfs_dir);
+ debugfs_create_dir(pci_name(ndev->ntb.pdev),
+ debugfs_dir);
if (!ndev->debugfs_dir)
ndev->debugfs_info = NULL;
else
@@ -812,7 +848,7 @@ static int amd_poll_link(struct amd_ntb_dev *ndev)
reg = readl(mmio + AMD_SIDEINFO_OFFSET);
reg &= NTB_LIN_STA_ACTIVE_BIT;
- dev_dbg(ndev_dev(ndev), "%s: reg_val = 0x%x.\n", __func__, reg);
+ dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg);
if (reg == ndev->cntl_sta)
return 0;
@@ -894,7 +930,8 @@ static int amd_init_ntb(struct amd_ntb_dev *ndev)
break;
default:
- dev_err(ndev_dev(ndev), "AMD NTB does not support B2B mode.\n");
+ dev_err(&ndev->ntb.pdev->dev,
+ "AMD NTB does not support B2B mode.\n");
return -EINVAL;
}
@@ -923,10 +960,10 @@ static int amd_init_dev(struct amd_ntb_dev *ndev)
struct pci_dev *pdev;
int rc = 0;
- pdev = ndev_pdev(ndev);
+ pdev = ndev->ntb.pdev;
ndev->ntb.topo = amd_get_topo(ndev);
- dev_dbg(ndev_dev(ndev), "AMD NTB topo is %s\n",
+ dev_dbg(&pdev->dev, "AMD NTB topo is %s\n",
ntb_topo_string(ndev->ntb.topo));
rc = amd_init_ntb(ndev);
@@ -935,7 +972,7 @@ static int amd_init_dev(struct amd_ntb_dev *ndev)
rc = amd_init_isr(ndev);
if (rc) {
- dev_err(ndev_dev(ndev), "fail to init isr.\n");
+ dev_err(&pdev->dev, "fail to init isr.\n");
return rc;
}
@@ -973,7 +1010,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
goto err_dma_mask;
- dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
+ dev_warn(&pdev->dev, "Cannot DMA highmem\n");
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -981,7 +1018,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
goto err_dma_mask;
- dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
+ dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
}
ndev->self_mmio = pci_iomap(pdev, 0, 0);
@@ -1004,7 +1041,7 @@ err_pci_enable:
static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev)
{
- struct pci_dev *pdev = ndev_pdev(ndev);
+ struct pci_dev *pdev = ndev->ntb.pdev;
pci_iounmap(pdev, ndev->self_mmio);
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h
index 13d73ed94a52..8f3617a46292 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.h
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.h
@@ -211,9 +211,6 @@ struct amd_ntb_dev {
struct dentry *debugfs_info;
};
-#define ndev_pdev(ndev) ((ndev)->ntb.pdev)
-#define ndev_name(ndev) pci_name(ndev_pdev(ndev))
-#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev)
#define ntb_ndev(__ntb) container_of(__ntb, struct amd_ntb_dev, ntb)
#define hb_ndev(__work) container_of(__work, struct amd_ntb_dev, hb_timer.work)
diff --git a/drivers/ntb/hw/idt/Kconfig b/drivers/ntb/hw/idt/Kconfig
new file mode 100644
index 000000000000..b360e5613b9f
--- /dev/null
+++ b/drivers/ntb/hw/idt/Kconfig
@@ -0,0 +1,31 @@
+config NTB_IDT
+ tristate "IDT PCIe-switch Non-Transparent Bridge support"
+ depends on PCI
+ help
+ This driver supports NTB of cappable IDT PCIe-switches.
+
+ Some of the pre-initializations must be made before IDT PCIe-switch
+ exposes it NT-functions correctly. It should be done by either proper
+ initialisation of EEPROM connected to master smbus of the switch or
+ by BIOS using slave-SMBus interface changing corresponding registers
+ value. Evidently it must be done before PCI bus enumeration is
+ finished in Linux kernel.
+
+ First of all partitions must be activated and properly assigned to all
+ the ports with NT-functions intended to be activated (see SWPARTxCTL
+ and SWPORTxCTL registers). Then all NT-function BARs must be enabled
+ with chosen valid aperture. For memory windows related BARs the
+ aperture settings shall determine the maximum size of memory windows
+ accepted by a BAR. Note that BAR0 must map PCI configuration space
+ registers.
+
+ It's worth to note, that since a part of this driver relies on the
+ BAR settings of peer NT-functions, the BAR setups can't be done over
+ kernel PCI fixups. That's why the alternative pre-initialization
+ techniques like BIOS using SMBus interface or EEPROM should be
+ utilized. Additionally if one needs to have temperature sensor
+ information printed to system log, the corresponding registers must
+ be initialized within BIOS/EEPROM as well.
+
+ If unsure, say N.
+
diff --git a/drivers/ntb/hw/idt/Makefile b/drivers/ntb/hw/idt/Makefile
new file mode 100644
index 000000000000..a102cf154be0
--- /dev/null
+++ b/drivers/ntb/hw/idt/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_IDT) += ntb_hw_idt.o
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
new file mode 100644
index 000000000000..d44d7ef38fe8
--- /dev/null
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -0,0 +1,2712 @@
+/*
+ * This file is provided under a GPLv2 license. When using or
+ * redistributing this file, you may do so under that license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (C) 2016 T-Platforms All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+ * Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, one can be found http://www.gnu.org/licenses/.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * IDT PCIe-switch NTB Linux driver
+ *
+ * Contact Information:
+ * Serge Semin <fancer.lancer@gmail.com>, <Sergey.Semin@t-platforms.ru>
+ */
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/ntb.h>
+
+#include "ntb_hw_idt.h"
+
+#define NTB_NAME "ntb_hw_idt"
+#define NTB_DESC "IDT PCI-E Non-Transparent Bridge Driver"
+#define NTB_VER "2.0"
+#define NTB_IRQNAME "ntb_irq_idt"
+
+MODULE_DESCRIPTION(NTB_DESC);
+MODULE_VERSION(NTB_VER);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("T-platforms");
+
+/*
+ * NT Endpoint registers table simplifying a loop access to the functionally
+ * related registers
+ */
+static const struct idt_ntb_regs ntdata_tbl = {
+ { {IDT_NT_BARSETUP0, IDT_NT_BARLIMIT0,
+ IDT_NT_BARLTBASE0, IDT_NT_BARUTBASE0},
+ {IDT_NT_BARSETUP1, IDT_NT_BARLIMIT1,
+ IDT_NT_BARLTBASE1, IDT_NT_BARUTBASE1},
+ {IDT_NT_BARSETUP2, IDT_NT_BARLIMIT2,
+ IDT_NT_BARLTBASE2, IDT_NT_BARUTBASE2},
+ {IDT_NT_BARSETUP3, IDT_NT_BARLIMIT3,
+ IDT_NT_BARLTBASE3, IDT_NT_BARUTBASE3},
+ {IDT_NT_BARSETUP4, IDT_NT_BARLIMIT4,
+ IDT_NT_BARLTBASE4, IDT_NT_BARUTBASE4},
+ {IDT_NT_BARSETUP5, IDT_NT_BARLIMIT5,
+ IDT_NT_BARLTBASE5, IDT_NT_BARUTBASE5} },
+ { {IDT_NT_INMSG0, IDT_NT_OUTMSG0, IDT_NT_INMSGSRC0},
+ {IDT_NT_INMSG1, IDT_NT_OUTMSG1, IDT_NT_INMSGSRC1},
+ {IDT_NT_INMSG2, IDT_NT_OUTMSG2, IDT_NT_INMSGSRC2},
+ {IDT_NT_INMSG3, IDT_NT_OUTMSG3, IDT_NT_INMSGSRC3} }
+};
+
+/*
+ * NT Endpoint ports data table with the corresponding pcie command, link
+ * status, control and BAR-related registers
+ */
+static const struct idt_ntb_port portdata_tbl[IDT_MAX_NR_PORTS] = {
+/*0*/ { IDT_SW_NTP0_PCIECMDSTS, IDT_SW_NTP0_PCIELCTLSTS,
+ IDT_SW_NTP0_NTCTL,
+ IDT_SW_SWPORT0CTL, IDT_SW_SWPORT0STS,
+ { {IDT_SW_NTP0_BARSETUP0, IDT_SW_NTP0_BARLIMIT0,
+ IDT_SW_NTP0_BARLTBASE0, IDT_SW_NTP0_BARUTBASE0},
+ {IDT_SW_NTP0_BARSETUP1, IDT_SW_NTP0_BARLIMIT1,
+ IDT_SW_NTP0_BARLTBASE1, IDT_SW_NTP0_BARUTBASE1},
+ {IDT_SW_NTP0_BARSETUP2, IDT_SW_NTP0_BARLIMIT2,
+ IDT_SW_NTP0_BARLTBASE2, IDT_SW_NTP0_BARUTBASE2},
+ {IDT_SW_NTP0_BARSETUP3, IDT_SW_NTP0_BARLIMIT3,
+ IDT_SW_NTP0_BARLTBASE3, IDT_SW_NTP0_BARUTBASE3},
+ {IDT_SW_NTP0_BARSETUP4, IDT_SW_NTP0_BARLIMIT4,
+ IDT_SW_NTP0_BARLTBASE4, IDT_SW_NTP0_BARUTBASE4},
+ {IDT_SW_NTP0_BARSETUP5, IDT_SW_NTP0_BARLIMIT5,
+ IDT_SW_NTP0_BARLTBASE5, IDT_SW_NTP0_BARUTBASE5} } },
+/*1*/ {0},
+/*2*/ { IDT_SW_NTP2_PCIECMDSTS, IDT_SW_NTP2_PCIELCTLSTS,
+ IDT_SW_NTP2_NTCTL,
+ IDT_SW_SWPORT2CTL, IDT_SW_SWPORT2STS,
+ { {IDT_SW_NTP2_BARSETUP0, IDT_SW_NTP2_BARLIMIT0,
+ IDT_SW_NTP2_BARLTBASE0, IDT_SW_NTP2_BARUTBASE0},
+ {IDT_SW_NTP2_BARSETUP1, IDT_SW_NTP2_BARLIMIT1,
+ IDT_SW_NTP2_BARLTBASE1, IDT_SW_NTP2_BARUTBASE1},
+ {IDT_SW_NTP2_BARSETUP2, IDT_SW_NTP2_BARLIMIT2,
+ IDT_SW_NTP2_BARLTBASE2, IDT_SW_NTP2_BARUTBASE2},
+ {IDT_SW_NTP2_BARSETUP3, IDT_SW_NTP2_BARLIMIT3,
+ IDT_SW_NTP2_BARLTBASE3, IDT_SW_NTP2_BARUTBASE3},
+ {IDT_SW_NTP2_BARSETUP4, IDT_SW_NTP2_BARLIMIT4,
+ IDT_SW_NTP2_BARLTBASE4, IDT_SW_NTP2_BARUTBASE4},
+ {IDT_SW_NTP2_BARSETUP5, IDT_SW_NTP2_BARLIMIT5,
+ IDT_SW_NTP2_BARLTBASE5, IDT_SW_NTP2_BARUTBASE5} } },
+/*3*/ {0},
+/*4*/ { IDT_SW_NTP4_PCIECMDSTS, IDT_SW_NTP4_PCIELCTLSTS,
+ IDT_SW_NTP4_NTCTL,
+ IDT_SW_SWPORT4CTL, IDT_SW_SWPORT4STS,
+ { {IDT_SW_NTP4_BARSETUP0, IDT_SW_NTP4_BARLIMIT0,
+ IDT_SW_NTP4_BARLTBASE0, IDT_SW_NTP4_BARUTBASE0},
+ {IDT_SW_NTP4_BARSETUP1, IDT_SW_NTP4_BARLIMIT1,
+ IDT_SW_NTP4_BARLTBASE1, IDT_SW_NTP4_BARUTBASE1},
+ {IDT_SW_NTP4_BARSETUP2, IDT_SW_NTP4_BARLIMIT2,
+ IDT_SW_NTP4_BARLTBASE2, IDT_SW_NTP4_BARUTBASE2},
+ {IDT_SW_NTP4_BARSETUP3, IDT_SW_NTP4_BARLIMIT3,
+ IDT_SW_NTP4_BARLTBASE3, IDT_SW_NTP4_BARUTBASE3},
+ {IDT_SW_NTP4_BARSETUP4, IDT_SW_NTP4_BARLIMIT4,
+ IDT_SW_NTP4_BARLTBASE4, IDT_SW_NTP4_BARUTBASE4},
+ {IDT_SW_NTP4_BARSETUP5, IDT_SW_NTP4_BARLIMIT5,
+ IDT_SW_NTP4_BARLTBASE5, IDT_SW_NTP4_BARUTBASE5} } },
+/*5*/ {0},
+/*6*/ { IDT_SW_NTP6_PCIECMDSTS, IDT_SW_NTP6_PCIELCTLSTS,
+ IDT_SW_NTP6_NTCTL,
+ IDT_SW_SWPORT6CTL, IDT_SW_SWPORT6STS,
+ { {IDT_SW_NTP6_BARSETUP0, IDT_SW_NTP6_BARLIMIT0,
+ IDT_SW_NTP6_BARLTBASE0, IDT_SW_NTP6_BARUTBASE0},
+ {IDT_SW_NTP6_BARSETUP1, IDT_SW_NTP6_BARLIMIT1,
+ IDT_SW_NTP6_BARLTBASE1, IDT_SW_NTP6_BARUTBASE1},
+ {IDT_SW_NTP6_BARSETUP2, IDT_SW_NTP6_BARLIMIT2,
+ IDT_SW_NTP6_BARLTBASE2, IDT_SW_NTP6_BARUTBASE2},
+ {IDT_SW_NTP6_BARSETUP3, IDT_SW_NTP6_BARLIMIT3,
+ IDT_SW_NTP6_BARLTBASE3, IDT_SW_NTP6_BARUTBASE3},
+ {IDT_SW_NTP6_BARSETUP4, IDT_SW_NTP6_BARLIMIT4,
+ IDT_SW_NTP6_BARLTBASE4, IDT_SW_NTP6_BARUTBASE4},
+ {IDT_SW_NTP6_BARSETUP5, IDT_SW_NTP6_BARLIMIT5,
+ IDT_SW_NTP6_BARLTBASE5, IDT_SW_NTP6_BARUTBASE5} } },
+/*7*/ {0},
+/*8*/ { IDT_SW_NTP8_PCIECMDSTS, IDT_SW_NTP8_PCIELCTLSTS,
+ IDT_SW_NTP8_NTCTL,
+ IDT_SW_SWPORT8CTL, IDT_SW_SWPORT8STS,
+ { {IDT_SW_NTP8_BARSETUP0, IDT_SW_NTP8_BARLIMIT0,
+ IDT_SW_NTP8_BARLTBASE0, IDT_SW_NTP8_BARUTBASE0},
+ {IDT_SW_NTP8_BARSETUP1, IDT_SW_NTP8_BARLIMIT1,
+ IDT_SW_NTP8_BARLTBASE1, IDT_SW_NTP8_BARUTBASE1},
+ {IDT_SW_NTP8_BARSETUP2, IDT_SW_NTP8_BARLIMIT2,
+ IDT_SW_NTP8_BARLTBASE2, IDT_SW_NTP8_BARUTBASE2},
+ {IDT_SW_NTP8_BARSETUP3, IDT_SW_NTP8_BARLIMIT3,
+ IDT_SW_NTP8_BARLTBASE3, IDT_SW_NTP8_BARUTBASE3},
+ {IDT_SW_NTP8_BARSETUP4, IDT_SW_NTP8_BARLIMIT4,
+ IDT_SW_NTP8_BARLTBASE4, IDT_SW_NTP8_BARUTBASE4},
+ {IDT_SW_NTP8_BARSETUP5, IDT_SW_NTP8_BARLIMIT5,
+ IDT_SW_NTP8_BARLTBASE5, IDT_SW_NTP8_BARUTBASE5} } },
+/*9*/ {0},
+/*10*/ {0},
+/*11*/ {0},
+/*12*/ { IDT_SW_NTP12_PCIECMDSTS, IDT_SW_NTP12_PCIELCTLSTS,
+ IDT_SW_NTP12_NTCTL,
+ IDT_SW_SWPORT12CTL, IDT_SW_SWPORT12STS,
+ { {IDT_SW_NTP12_BARSETUP0, IDT_SW_NTP12_BARLIMIT0,
+ IDT_SW_NTP12_BARLTBASE0, IDT_SW_NTP12_BARUTBASE0},
+ {IDT_SW_NTP12_BARSETUP1, IDT_SW_NTP12_BARLIMIT1,
+ IDT_SW_NTP12_BARLTBASE1, IDT_SW_NTP12_BARUTBASE1},
+ {IDT_SW_NTP12_BARSETUP2, IDT_SW_NTP12_BARLIMIT2,
+ IDT_SW_NTP12_BARLTBASE2, IDT_SW_NTP12_BARUTBASE2},
+ {IDT_SW_NTP12_BARSETUP3, IDT_SW_NTP12_BARLIMIT3,
+ IDT_SW_NTP12_BARLTBASE3, IDT_SW_NTP12_BARUTBASE3},
+ {IDT_SW_NTP12_BARSETUP4, IDT_SW_NTP12_BARLIMIT4,
+ IDT_SW_NTP12_BARLTBASE4, IDT_SW_NTP12_BARUTBASE4},
+ {IDT_SW_NTP12_BARSETUP5, IDT_SW_NTP12_BARLIMIT5,
+ IDT_SW_NTP12_BARLTBASE5, IDT_SW_NTP12_BARUTBASE5} } },
+/*13*/ {0},
+/*14*/ {0},
+/*15*/ {0},
+/*16*/ { IDT_SW_NTP16_PCIECMDSTS, IDT_SW_NTP16_PCIELCTLSTS,
+ IDT_SW_NTP16_NTCTL,
+ IDT_SW_SWPORT16CTL, IDT_SW_SWPORT16STS,
+ { {IDT_SW_NTP16_BARSETUP0, IDT_SW_NTP16_BARLIMIT0,
+ IDT_SW_NTP16_BARLTBASE0, IDT_SW_NTP16_BARUTBASE0},
+ {IDT_SW_NTP16_BARSETUP1, IDT_SW_NTP16_BARLIMIT1,
+ IDT_SW_NTP16_BARLTBASE1, IDT_SW_NTP16_BARUTBASE1},
+ {IDT_SW_NTP16_BARSETUP2, IDT_SW_NTP16_BARLIMIT2,
+ IDT_SW_NTP16_BARLTBASE2, IDT_SW_NTP16_BARUTBASE2},
+ {IDT_SW_NTP16_BARSETUP3, IDT_SW_NTP16_BARLIMIT3,
+ IDT_SW_NTP16_BARLTBASE3, IDT_SW_NTP16_BARUTBASE3},
+ {IDT_SW_NTP16_BARSETUP4, IDT_SW_NTP16_BARLIMIT4,
+ IDT_SW_NTP16_BARLTBASE4, IDT_SW_NTP16_BARUTBASE4},
+ {IDT_SW_NTP16_BARSETUP5, IDT_SW_NTP16_BARLIMIT5,
+ IDT_SW_NTP16_BARLTBASE5, IDT_SW_NTP16_BARUTBASE5} } },
+/*17*/ {0},
+/*18*/ {0},
+/*19*/ {0},
+/*20*/ { IDT_SW_NTP20_PCIECMDSTS, IDT_SW_NTP20_PCIELCTLSTS,
+ IDT_SW_NTP20_NTCTL,
+ IDT_SW_SWPORT20CTL, IDT_SW_SWPORT20STS,
+ { {IDT_SW_NTP20_BARSETUP0, IDT_SW_NTP20_BARLIMIT0,
+ IDT_SW_NTP20_BARLTBASE0, IDT_SW_NTP20_BARUTBASE0},
+ {IDT_SW_NTP20_BARSETUP1, IDT_SW_NTP20_BARLIMIT1,
+ IDT_SW_NTP20_BARLTBASE1, IDT_SW_NTP20_BARUTBASE1},
+ {IDT_SW_NTP20_BARSETUP2, IDT_SW_NTP20_BARLIMIT2,
+ IDT_SW_NTP20_BARLTBASE2, IDT_SW_NTP20_BARUTBASE2},
+ {IDT_SW_NTP20_BARSETUP3, IDT_SW_NTP20_BARLIMIT3,
+ IDT_SW_NTP20_BARLTBASE3, IDT_SW_NTP20_BARUTBASE3},
+ {IDT_SW_NTP20_BARSETUP4, IDT_SW_NTP20_BARLIMIT4,
+ IDT_SW_NTP20_BARLTBASE4, IDT_SW_NTP20_BARUTBASE4},
+ {IDT_SW_NTP20_BARSETUP5, IDT_SW_NTP20_BARLIMIT5,
+ IDT_SW_NTP20_BARLTBASE5, IDT_SW_NTP20_BARUTBASE5} } },
+/*21*/ {0},
+/*22*/ {0},
+/*23*/ {0}
+};
+
+/*
+ * IDT PCIe-switch partitions table with the corresponding control, status
+ * and messages control registers
+ */
+static const struct idt_ntb_part partdata_tbl[IDT_MAX_NR_PARTS] = {
+/*0*/ { IDT_SW_SWPART0CTL, IDT_SW_SWPART0STS,
+ {IDT_SW_SWP0MSGCTL0, IDT_SW_SWP0MSGCTL1,
+ IDT_SW_SWP0MSGCTL2, IDT_SW_SWP0MSGCTL3} },
+/*1*/ { IDT_SW_SWPART1CTL, IDT_SW_SWPART1STS,
+ {IDT_SW_SWP1MSGCTL0, IDT_SW_SWP1MSGCTL1,
+ IDT_SW_SWP1MSGCTL2, IDT_SW_SWP1MSGCTL3} },
+/*2*/ { IDT_SW_SWPART2CTL, IDT_SW_SWPART2STS,
+ {IDT_SW_SWP2MSGCTL0, IDT_SW_SWP2MSGCTL1,
+ IDT_SW_SWP2MSGCTL2, IDT_SW_SWP2MSGCTL3} },
+/*3*/ { IDT_SW_SWPART3CTL, IDT_SW_SWPART3STS,
+ {IDT_SW_SWP3MSGCTL0, IDT_SW_SWP3MSGCTL1,
+ IDT_SW_SWP3MSGCTL2, IDT_SW_SWP3MSGCTL3} },
+/*4*/ { IDT_SW_SWPART4CTL, IDT_SW_SWPART4STS,
+ {IDT_SW_SWP4MSGCTL0, IDT_SW_SWP4MSGCTL1,
+ IDT_SW_SWP4MSGCTL2, IDT_SW_SWP4MSGCTL3} },
+/*5*/ { IDT_SW_SWPART5CTL, IDT_SW_SWPART5STS,
+ {IDT_SW_SWP5MSGCTL0, IDT_SW_SWP5MSGCTL1,
+ IDT_SW_SWP5MSGCTL2, IDT_SW_SWP5MSGCTL3} },
+/*6*/ { IDT_SW_SWPART6CTL, IDT_SW_SWPART6STS,
+ {IDT_SW_SWP6MSGCTL0, IDT_SW_SWP6MSGCTL1,
+ IDT_SW_SWP6MSGCTL2, IDT_SW_SWP6MSGCTL3} },
+/*7*/ { IDT_SW_SWPART7CTL, IDT_SW_SWPART7STS,
+ {IDT_SW_SWP7MSGCTL0, IDT_SW_SWP7MSGCTL1,
+ IDT_SW_SWP7MSGCTL2, IDT_SW_SWP7MSGCTL3} }
+};
+
+/*
+ * DebugFS directory to place the driver debug file
+ */
+static struct dentry *dbgfs_topdir;
+
+/*=============================================================================
+ * 1. IDT PCIe-switch registers IO-functions
+ *
+ * Beside ordinary configuration space registers IDT PCIe-switch expose
+ * global configuration registers, which are used to determine state of other
+ * device ports as well as being notified of some switch-related events.
+ * Additionally all the configuration space registers of all the IDT
+ * PCIe-switch functions are mapped to the Global Address space, so each
+ * function can determine a configuration of any other PCI-function.
+ * Functions declared in this chapter are created to encapsulate access
+ * to configuration and global registers, so the driver code just need to
+ * provide IDT NTB hardware descriptor and a register address.
+ *=============================================================================
+ */
+
+/*
+ * idt_nt_write() - PCI configuration space registers write method
+ * @ndev: IDT NTB hardware driver descriptor
+ * @reg: Register to write data to
+ * @data: Value to write to the register
+ *
+ * IDT PCIe-switch registers are all Little endian.
+ */
+static void idt_nt_write(struct idt_ntb_dev *ndev,
+ const unsigned int reg, const u32 data)
+{
+ /*
+ * It's obvious bug to request a register exceeding the maximum possible
+ * value as well as to have it unaligned.
+ */
+ if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
+ return;
+
+ /* Just write the value to the specified register */
+ iowrite32(data, ndev->cfgspc + (ptrdiff_t)reg);
+}
+
+/*
+ * idt_nt_read() - PCI configuration space registers read method
+ * @ndev: IDT NTB hardware driver descriptor
+ * @reg: Register to write data to
+ *
+ * IDT PCIe-switch Global configuration registers are all Little endian.
+ *
+ * Return: register value
+ */
+static u32 idt_nt_read(struct idt_ntb_dev *ndev, const unsigned int reg)
+{
+ /*
+ * It's obvious bug to request a register exceeding the maximum possible
+ * value as well as to have it unaligned.
+ */
+ if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
+ return ~0;
+
+ /* Just read the value from the specified register */
+ return ioread32(ndev->cfgspc + (ptrdiff_t)reg);
+}
+
+/*
+ * idt_sw_write() - Global registers write method
+ * @ndev: IDT NTB hardware driver descriptor
+ * @reg: Register to write data to
+ * @data: Value to write to the register
+ *
+ * IDT PCIe-switch Global configuration registers are all Little endian.
+ */
+static void idt_sw_write(struct idt_ntb_dev *ndev,
+ const unsigned int reg, const u32 data)
+{
+ unsigned long irqflags;
+
+ /*
+ * It's obvious bug to request a register exceeding the maximum possible
+ * value as well as to have it unaligned.
+ */
+ if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
+ return;
+
+ /* Lock GASA registers operations */
+ spin_lock_irqsave(&ndev->gasa_lock, irqflags);
+ /* Set the global register address */
+ iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
+ /* Put the new value of the register */
+ iowrite32(data, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
+ /* Make sure the PCIe transactions are executed */
+ mmiowb();
+ /* Unlock GASA registers operations */
+ spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
+}
+
+/*
+ * idt_sw_read() - Global registers read method
+ * @ndev: IDT NTB hardware driver descriptor
+ * @reg: Register to write data to
+ *
+ * IDT PCIe-switch Global configuration registers are all Little endian.
+ *
+ * Return: register value
+ */
+static u32 idt_sw_read(struct idt_ntb_dev *ndev, const unsigned int reg)
+{
+ unsigned long irqflags;
+ u32 data;
+
+ /*
+ * It's obvious bug to request a register exceeding the maximum possible
+ * value as well as to have it unaligned.
+ */
+ if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
+ return ~0;
+
+ /* Lock GASA registers operations */
+ spin_lock_irqsave(&ndev->gasa_lock, irqflags);
+ /* Set the global register address */
+ iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
+ /* Get the data of the register (read ops acts as MMIO barrier) */
+ data = ioread32(ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
+ /* Unlock GASA registers operations */
+ spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
+
+ return data;
+}
+
+/*
+ * idt_reg_set_bits() - set bits of a passed register
+ * @ndev: IDT NTB hardware driver descriptor
+ * @reg: Register to change bits of
+ * @reg_lock: Register access spin lock
+ * @valid_mask: Mask of valid bits
+ * @set_bits: Bitmask to set
+ *
+ * Helper method to check whether a passed bitfield is valid and set
+ * corresponding bits of a register.
+ *
+ * WARNING! Make sure the passed register isn't accessed over plane
+ * idt_nt_write() method (read method is ok to be used concurrently).
+ *
+ * Return: zero on success, negative error on invalid bitmask.
+ */
+static inline int idt_reg_set_bits(struct idt_ntb_dev *ndev, unsigned int reg,
+ spinlock_t *reg_lock,
+ u64 valid_mask, u64 set_bits)
+{
+ unsigned long irqflags;
+ u32 data;
+
+ if (set_bits & ~(u64)valid_mask)
+ return -EINVAL;
+
+ /* Lock access to the register unless the change is written back */
+ spin_lock_irqsave(reg_lock, irqflags);
+ data = idt_nt_read(ndev, reg) | (u32)set_bits;
+ idt_nt_write(ndev, reg, data);
+ /* Unlock the register */
+ spin_unlock_irqrestore(reg_lock, irqflags);
+
+ return 0;
+}
+
+/*
+ * idt_reg_clear_bits() - clear bits of a passed register
+ * @ndev: IDT NTB hardware driver descriptor
+ * @reg: Register to change bits of
+ * @reg_lock: Register access spin lock
+ * @set_bits: Bitmask to clear
+ *
+ * Helper method to check whether a passed bitfield is valid and clear
+ * corresponding bits of a register.
+ *
+ * NOTE! Invalid bits are always considered cleared so it's not an error
+ * to clear them over.
+ *
+ * WARNING! Make sure the passed register isn't accessed over plane
+ * idt_nt_write() method (read method is ok to use concurrently).
+ */
+static inline void idt_reg_clear_bits(struct idt_ntb_dev *ndev,
+ unsigned int reg, spinlock_t *reg_lock,
+ u64 clear_bits)
+{
+ unsigned long irqflags;
+ u32 data;
+
+ /* Lock access to the register unless the change is written back */
+ spin_lock_irqsave(reg_lock, irqflags);
+ data = idt_nt_read(ndev, reg) & ~(u32)clear_bits;
+ idt_nt_write(ndev, reg, data);
+ /* Unlock the register */
+ spin_unlock_irqrestore(reg_lock, irqflags);
+}
+
+/*===========================================================================
+ * 2. Ports operations
+ *
+ * IDT PCIe-switches can have from 3 up to 8 ports with possible
+ * NT-functions enabled. So all the possible ports need to be scanned looking
+ * for NTB activated. NTB API will have enumerated only the ports with NTB.
+ *===========================================================================
+ */
+
+/*
+ * idt_scan_ports() - scan IDT PCIe-switch ports collecting info in the tables
+ * @ndev: Pointer to the PCI device descriptor
+ *
+ * Return: zero on success, otherwise a negative error number.
+ */
+static int idt_scan_ports(struct idt_ntb_dev *ndev)
+{
+ unsigned char pidx, port, part;
+ u32 data, portsts, partsts;
+
+ /* Retrieve the local port number */
+ data = idt_nt_read(ndev, IDT_NT_PCIELCAP);
+ ndev->port = GET_FIELD(PCIELCAP_PORTNUM, data);
+
+ /* Retrieve the local partition number */
+ portsts = idt_sw_read(ndev, portdata_tbl[ndev->port].sts);
+ ndev->part = GET_FIELD(SWPORTxSTS_SWPART, portsts);
+
+ /* Initialize port/partition -> index tables with invalid values */
+ memset(ndev->port_idx_map, -EINVAL, sizeof(ndev->port_idx_map));
+ memset(ndev->part_idx_map, -EINVAL, sizeof(ndev->part_idx_map));
+
+ /*
+ * Walk over all the possible ports checking whether any of them has
+ * NT-function activated
+ */
+ ndev->peer_cnt = 0;
+ for (pidx = 0; pidx < ndev->swcfg->port_cnt; pidx++) {
+ port = ndev->swcfg->ports[pidx];
+ /* Skip local port */
+ if (port == ndev->port)
+ continue;
+
+ /* Read the port status register to get it partition */
+ portsts = idt_sw_read(ndev, portdata_tbl[port].sts);
+ part = GET_FIELD(SWPORTxSTS_SWPART, portsts);
+
+ /* Retrieve the partition status */
+ partsts = idt_sw_read(ndev, partdata_tbl[part].sts);
+ /* Check if partition state is active and port has NTB */
+ if (IS_FLD_SET(SWPARTxSTS_STATE, partsts, ACT) &&
+ (IS_FLD_SET(SWPORTxSTS_MODE, portsts, NT) ||
+ IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNT) ||
+ IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNTDMA) ||
+ IS_FLD_SET(SWPORTxSTS_MODE, portsts, NTDMA))) {
+ /* Save the port and partition numbers */
+ ndev->peers[ndev->peer_cnt].port = port;
+ ndev->peers[ndev->peer_cnt].part = part;
+ /* Fill in the port/partition -> index tables */
+ ndev->port_idx_map[port] = ndev->peer_cnt;
+ ndev->part_idx_map[part] = ndev->peer_cnt;
+ ndev->peer_cnt++;
+ }
+ }
+
+ dev_dbg(&ndev->ntb.pdev->dev, "Local port: %hhu, num of peers: %hhu\n",
+ ndev->port, ndev->peer_cnt);
+
+ /* It's useless to have this driver loaded if there is no any peer */
+ if (ndev->peer_cnt == 0) {
+ dev_warn(&ndev->ntb.pdev->dev, "No active peer found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * idt_ntb_port_number() - get the local port number
+ * @ntb: NTB device context.
+ *
+ * Return: the local port number
+ */
+static int idt_ntb_port_number(struct ntb_dev *ntb)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ return ndev->port;
+}
+
+/*
+ * idt_ntb_peer_port_count() - get the number of peer ports
+ * @ntb: NTB device context.
+ *
+ * Return the count of detected peer NT-functions.
+ *
+ * Return: number of peer ports
+ */
+static int idt_ntb_peer_port_count(struct ntb_dev *ntb)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ return ndev->peer_cnt;
+}
+
+/*
+ * idt_ntb_peer_port_number() - get peer port by given index
+ * @ntb: NTB device context.
+ * @pidx: Peer port index.
+ *
+ * Return: peer port or negative error
+ */
+static int idt_ntb_peer_port_number(struct ntb_dev *ntb, int pidx)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ if (pidx < 0 || ndev->peer_cnt <= pidx)
+ return -EINVAL;
+
+ /* Return the detected NT-function port number */
+ return ndev->peers[pidx].port;
+}
+
+/*
+ * idt_ntb_peer_port_idx() - get peer port index by given port number
+ * @ntb: NTB device context.
+ * @port: Peer port number.
+ *
+ * Internal port -> index table is pre-initialized with -EINVAL values,
+ * so we just need to return it value
+ *
+ * Return: peer NT-function port index or negative error
+ */
+static int idt_ntb_peer_port_idx(struct ntb_dev *ntb, int port)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ if (port < 0 || IDT_MAX_NR_PORTS <= port)
+ return -EINVAL;
+
+ return ndev->port_idx_map[port];
+}
+
+/*===========================================================================
+ * 3. Link status operations
+ * There is no any ready-to-use method to have peer ports notified if NTB
+ * link is set up or got down. Instead global signal can be used instead.
+ * In case if any one of ports changes local NTB link state, it sends
+ * global signal and clears corresponding global state bit. Then all the ports
+ * receive a notification of that, so to make client driver being aware of
+ * possible NTB link change.
+ * Additionally each of active NT-functions is subscribed to PCIe-link
+ * state changes of peer ports.
+ *===========================================================================
+ */
+
+static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev);
+
+/*
+ * idt_init_link() - Initialize NTB link state notification subsystem
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * Function performs the basic initialization of some global registers
+ * needed to enable IRQ-based notifications of PCIe Link Up/Down and
+ * Global Signal events.
+ * NOTE Since it's not possible to determine when all the NTB peer drivers are
+ * unloaded as well as have those registers accessed concurrently, we must
+ * preinitialize them with the same value and leave it uncleared on local
+ * driver unload.
+ */
+static void idt_init_link(struct idt_ntb_dev *ndev)
+{
+ u32 part_mask, port_mask, se_mask;
+ unsigned char pidx;
+
+ /* Initialize spin locker of Mapping Table access registers */
+ spin_lock_init(&ndev->mtbl_lock);
+
+ /* Walk over all detected peers collecting port and partition masks */
+ port_mask = ~BIT(ndev->port);
+ part_mask = ~BIT(ndev->part);
+ for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
+ port_mask &= ~BIT(ndev->peers[pidx].port);
+ part_mask &= ~BIT(ndev->peers[pidx].part);
+ }
+
+ /* Clean the Link Up/Down and GLobal Signal status registers */
+ idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1);
+ idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1);
+ idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1);
+
+ /* Unmask NT-activated partitions to receive Global Switch events */
+ idt_sw_write(ndev, IDT_SW_SEPMSK, part_mask);
+
+ /* Enable PCIe Link Up events of NT-activated ports */
+ idt_sw_write(ndev, IDT_SW_SELINKUPMSK, port_mask);
+
+ /* Enable PCIe Link Down events of NT-activated ports */
+ idt_sw_write(ndev, IDT_SW_SELINKDNMSK, port_mask);
+
+ /* Unmask NT-activated partitions to receive Global Signal events */
+ idt_sw_write(ndev, IDT_SW_SEGSIGMSK, part_mask);
+
+ /* Unmask Link Up/Down and Global Switch Events */
+ se_mask = ~(IDT_SEMSK_LINKUP | IDT_SEMSK_LINKDN | IDT_SEMSK_GSIGNAL);
+ idt_sw_write(ndev, IDT_SW_SEMSK, se_mask);
+
+ dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events initialized");
+}
+
+/*
+ * idt_deinit_link() - deinitialize link subsystem
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * Just disable the link back.
+ */
+static void idt_deinit_link(struct idt_ntb_dev *ndev)
+{
+ /* Disable the link */
+ idt_ntb_local_link_disable(ndev);
+
+ dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events deinitialized");
+}
+
+/*
+ * idt_se_isr() - switch events ISR
+ * @ndev: IDT NTB hardware driver descriptor
+ * @ntint_sts: NT-function interrupt status
+ *
+ * This driver doesn't support IDT PCIe-switch dynamic reconfigurations,
+ * Failover capability, etc, so switch events are utilized to notify of
+ * PCIe and NTB link events.
+ * The method is called from PCIe ISR bottom-half routine.
+ */
+static void idt_se_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
+{
+ u32 sests;
+
+ /* Read Switch Events status */
+ sests = idt_sw_read(ndev, IDT_SW_SESTS);
+
+ /* Clean the Link Up/Down and Global Signal status registers */
+ idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1);
+ idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1);
+ idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1);
+
+ /* Clean the corresponding interrupt bit */
+ idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_SEVENT);
+
+ dev_dbg(&ndev->ntb.pdev->dev, "SE IRQ detected %#08x (SESTS %#08x)",
+ ntint_sts, sests);
+
+ /* Notify the client driver of possible link state change */
+ ntb_link_event(&ndev->ntb);
+}
+
+/*
+ * idt_ntb_local_link_enable() - enable the local NTB link.
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * In order to enable the NTB link we need:
+ * - enable Completion TLPs translation
+ * - initialize mapping table to enable the Request ID translation
+ * - notify peers of NTB link state change
+ */
+static void idt_ntb_local_link_enable(struct idt_ntb_dev *ndev)
+{
+ u32 reqid, mtbldata = 0;
+ unsigned long irqflags;
+
+ /* Enable the ID protection and Completion TLPs translation */
+ idt_nt_write(ndev, IDT_NT_NTCTL, IDT_NTCTL_CPEN);
+
+ /* Retrieve the current Requester ID (Bus:Device:Function) */
+ reqid = idt_nt_read(ndev, IDT_NT_REQIDCAP);
+
+ /*
+ * Set the corresponding NT Mapping table entry of port partition index
+ * with the data to perform the Request ID translation
+ */
+ mtbldata = SET_FIELD(NTMTBLDATA_REQID, 0, reqid) |
+ SET_FIELD(NTMTBLDATA_PART, 0, ndev->part) |
+ IDT_NTMTBLDATA_VALID;
+ spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
+ idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
+ idt_nt_write(ndev, IDT_NT_NTMTBLDATA, mtbldata);
+ mmiowb();
+ spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
+
+ /* Notify the peers by setting and clearing the global signal bit */
+ idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET);
+ idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part);
+}
+
+/*
+ * idt_ntb_local_link_disable() - disable the local NTB link.
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * In order to enable the NTB link we need:
+ * - disable Completion TLPs translation
+ * - clear corresponding mapping table entry
+ * - notify peers of NTB link state change
+ */
+static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev)
+{
+ unsigned long irqflags;
+
+ /* Disable Completion TLPs translation */
+ idt_nt_write(ndev, IDT_NT_NTCTL, 0);
+
+ /* Clear the corresponding NT Mapping table entry */
+ spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
+ idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
+ idt_nt_write(ndev, IDT_NT_NTMTBLDATA, 0);
+ mmiowb();
+ spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
+
+ /* Notify the peers by setting and clearing the global signal bit */
+ idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET);
+ idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part);
+}
+
+/*
+ * idt_ntb_local_link_is_up() - test wethter local NTB link is up
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * Local link is up under the following conditions:
+ * - Bus mastering is enabled
+ * - NTCTL has Completion TLPs translation enabled
+ * - Mapping table permits Request TLPs translation
+ * NOTE: We don't need to check PCIe link state since it's obviously
+ * up while we are able to communicate with IDT PCIe-switch
+ *
+ * Return: true if link is up, otherwise false
+ */
+static bool idt_ntb_local_link_is_up(struct idt_ntb_dev *ndev)
+{
+ unsigned long irqflags;
+ u32 data;
+
+ /* Read the local Bus Master Enable status */
+ data = idt_nt_read(ndev, IDT_NT_PCICMDSTS);
+ if (!(data & IDT_PCICMDSTS_BME))
+ return false;
+
+ /* Read the local Completion TLPs translation enable status */
+ data = idt_nt_read(ndev, IDT_NT_NTCTL);
+ if (!(data & IDT_NTCTL_CPEN))
+ return false;
+
+ /* Read Mapping table entry corresponding to the local partition */
+ spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
+ idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
+ data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
+ spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
+
+ return !!(data & IDT_NTMTBLDATA_VALID);
+}
+
+/*
+ * idt_ntb_peer_link_is_up() - test whether peer NTB link is up
+ * @ndev: IDT NTB hardware driver descriptor
+ * @pidx: Peer port index
+ *
+ * Peer link is up under the following conditions:
+ * - PCIe link is up
+ * - Bus mastering is enabled
+ * - NTCTL has Completion TLPs translation enabled
+ * - Mapping table permits Request TLPs translation
+ *
+ * Return: true if link is up, otherwise false
+ */
+static bool idt_ntb_peer_link_is_up(struct idt_ntb_dev *ndev, int pidx)
+{
+ unsigned long irqflags;
+ unsigned char port;
+ u32 data;
+
+ /* Retrieve the device port number */
+ port = ndev->peers[pidx].port;
+
+ /* Check whether PCIe link is up */
+ data = idt_sw_read(ndev, portdata_tbl[port].sts);
+ if (!(data & IDT_SWPORTxSTS_LINKUP))
+ return false;
+
+ /* Check whether bus mastering is enabled on the peer port */
+ data = idt_sw_read(ndev, portdata_tbl[port].pcicmdsts);
+ if (!(data & IDT_PCICMDSTS_BME))
+ return false;
+
+ /* Check if Completion TLPs translation is enabled on the peer port */
+ data = idt_sw_read(ndev, portdata_tbl[port].ntctl);
+ if (!(data & IDT_NTCTL_CPEN))
+ return false;
+
+ /* Read Mapping table entry corresponding to the peer partition */
+ spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
+ idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->peers[pidx].part);
+ data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
+ spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
+
+ return !!(data & IDT_NTMTBLDATA_VALID);
+}
+
+/*
+ * idt_ntb_link_is_up() - get the current ntb link state (NTB API callback)
+ * @ntb: NTB device context.
+ * @speed: OUT - The link speed expressed as PCIe generation number.
+ * @width: OUT - The link width expressed as the number of PCIe lanes.
+ *
+ * Get the bitfield of NTB link states for all peer ports
+ *
+ * Return: bitfield of indexed ports link state: bit is set/cleared if the
+ * link is up/down respectively.
+ */
+static u64 idt_ntb_link_is_up(struct ntb_dev *ntb,
+ enum ntb_speed *speed, enum ntb_width *width)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+ unsigned char pidx;
+ u64 status;
+ u32 data;
+
+ /* Retrieve the local link speed and width */
+ if (speed != NULL || width != NULL) {
+ data = idt_nt_read(ndev, IDT_NT_PCIELCTLSTS);
+ if (speed != NULL)
+ *speed = GET_FIELD(PCIELCTLSTS_CLS, data);
+ if (width != NULL)
+ *width = GET_FIELD(PCIELCTLSTS_NLW, data);
+ }
+
+ /* If local NTB link isn't up then all the links are considered down */
+ if (!idt_ntb_local_link_is_up(ndev))
+ return 0;
+
+ /* Collect all the peer ports link states into the bitfield */
+ status = 0;
+ for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
+ if (idt_ntb_peer_link_is_up(ndev, pidx))
+ status |= ((u64)1 << pidx);
+ }
+
+ return status;
+}
+
+/*
+ * idt_ntb_link_enable() - enable local port ntb link (NTB API callback)
+ * @ntb: NTB device context.
+ * @max_speed: The maximum link speed expressed as PCIe generation number.
+ * @max_width: The maximum link width expressed as the number of PCIe lanes.
+ *
+ * Enable just local NTB link. PCIe link parameters are ignored.
+ *
+ * Return: always zero.
+ */
+static int idt_ntb_link_enable(struct ntb_dev *ntb, enum ntb_speed speed,
+ enum ntb_width width)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ /* Just enable the local NTB link */
+ idt_ntb_local_link_enable(ndev);
+
+ dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link enabled");
+
+ return 0;
+}
+
+/*
+ * idt_ntb_link_disable() - disable local port ntb link (NTB API callback)
+ * @ntb: NTB device context.
+ *
+ * Disable just local NTB link.
+ *
+ * Return: always zero.
+ */
+static int idt_ntb_link_disable(struct ntb_dev *ntb)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ /* Just disable the local NTB link */
+ idt_ntb_local_link_disable(ndev);
+
+ dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link disabled");
+
+ return 0;
+}
+
+/*=============================================================================
+ * 4. Memory Window operations
+ *
+ * IDT PCIe-switches have two types of memory windows: MWs with direct
+ * address translation and MWs with LUT based translation. The first type of
+ * MWs is simple map of corresponding BAR address space to a memory space
+ * of specified target port. So it implemets just ont-to-one mapping. Lookup
+ * table in its turn can map one BAR address space to up to 24 different
+ * memory spaces of different ports.
+ * NT-functions BARs can be turned on to implement either direct or lookup
+ * table based address translations, so:
+ * BAR0 - NT configuration registers space/direct address translation
+ * BAR1 - direct address translation/upper address of BAR0x64
+ * BAR2 - direct address translation/Lookup table with either 12 or 24 entries
+ * BAR3 - direct address translation/upper address of BAR2x64
+ * BAR4 - direct address translation/Lookup table with either 12 or 24 entries
+ * BAR5 - direct address translation/upper address of BAR4x64
+ * Additionally BAR2 and BAR4 can't have 24-entries LUT enabled at the same
+ * time. Since the BARs setup can be rather complicated this driver implements
+ * a scanning algorithm to have all the possible memory windows configuration
+ * covered.
+ *
+ * NOTE 1 BAR setup must be done before Linux kernel enumerated NT-function
+ * of any port, so this driver would have memory windows configurations fixed.
+ * In this way all initializations must be performed either by platform BIOS
+ * or using EEPROM connected to IDT PCIe-switch master SMBus.
+ *
+ * NOTE 2 This driver expects BAR0 mapping NT-function configuration space.
+ * Easy calculation can give us an upper boundary of 29 possible memory windows
+ * per each NT-function if all the BARs are of 32bit type.
+ *=============================================================================
+ */
+
+/*
+ * idt_get_mw_count() - get memory window count
+ * @mw_type: Memory window type
+ *
+ * Return: number of memory windows with respect to the BAR type
+ */
+static inline unsigned char idt_get_mw_count(enum idt_mw_type mw_type)
+{
+ switch (mw_type) {
+ case IDT_MW_DIR:
+ return 1;
+ case IDT_MW_LUT12:
+ return 12;
+ case IDT_MW_LUT24:
+ return 24;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * idt_get_mw_name() - get memory window name
+ * @mw_type: Memory window type
+ *
+ * Return: pointer to a string with name
+ */
+static inline char *idt_get_mw_name(enum idt_mw_type mw_type)
+{
+ switch (mw_type) {
+ case IDT_MW_DIR:
+ return "DIR ";
+ case IDT_MW_LUT12:
+ return "LUT12";
+ case IDT_MW_LUT24:
+ return "LUT24";
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+/*
+ * idt_scan_mws() - scan memory windows of the port
+ * @ndev: IDT NTB hardware driver descriptor
+ * @port: Port to get number of memory windows for
+ * @mw_cnt: Out - number of memory windows
+ *
+ * It walks over BAR setup registers of the specified port and determines
+ * the memory windows parameters if any activated.
+ *
+ * Return: array of memory windows
+ */
+static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
+ unsigned char *mw_cnt)
+{
+ struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws;
+ const struct idt_ntb_bar *bars;
+ enum idt_mw_type mw_type;
+ unsigned char widx, bidx, en_cnt;
+ bool bar_64bit = false;
+ int aprt_size;
+ u32 data;
+
+ /* Retrieve the array of the BARs registers */
+ bars = portdata_tbl[port].bars;
+
+ /* Scan all the BARs belonging to the port */
+ *mw_cnt = 0;
+ for (bidx = 0; bidx < IDT_BAR_CNT; bidx += 1 + bar_64bit) {
+ /* Read BARSETUP register value */
+ data = idt_sw_read(ndev, bars[bidx].setup);
+
+ /* Skip disabled BARs */
+ if (!(data & IDT_BARSETUP_EN)) {
+ bar_64bit = false;
+ continue;
+ }
+
+ /* Skip next BARSETUP if current one has 64bit addressing */
+ bar_64bit = IS_FLD_SET(BARSETUP_TYPE, data, 64);
+
+ /* Skip configuration space mapping BARs */
+ if (data & IDT_BARSETUP_MODE_CFG)
+ continue;
+
+ /* Retrieve MW type/entries count and aperture size */
+ mw_type = GET_FIELD(BARSETUP_ATRAN, data);
+ en_cnt = idt_get_mw_count(mw_type);
+ aprt_size = (u64)1 << GET_FIELD(BARSETUP_SIZE, data);
+
+ /* Save configurations of all available memory windows */
+ for (widx = 0; widx < en_cnt; widx++, (*mw_cnt)++) {
+ /*
+ * IDT can expose a limited number of MWs, so it's bug
+ * to have more than the driver expects
+ */
+ if (*mw_cnt >= IDT_MAX_NR_MWS)
+ return ERR_PTR(-EINVAL);
+
+ /* Save basic MW info */
+ mws[*mw_cnt].type = mw_type;
+ mws[*mw_cnt].bar = bidx;
+ mws[*mw_cnt].idx = widx;
+ /* It's always DWORD aligned */
+ mws[*mw_cnt].addr_align = IDT_TRANS_ALIGN;
+ /* DIR and LUT approachs differently configure MWs */
+ if (mw_type == IDT_MW_DIR)
+ mws[*mw_cnt].size_max = aprt_size;
+ else if (mw_type == IDT_MW_LUT12)
+ mws[*mw_cnt].size_max = aprt_size / 16;
+ else
+ mws[*mw_cnt].size_max = aprt_size / 32;
+ mws[*mw_cnt].size_align = (mw_type == IDT_MW_DIR) ?
+ IDT_DIR_SIZE_ALIGN : mws[*mw_cnt].size_max;
+ }
+ }
+
+ /* Allocate memory for memory window descriptors */
+ ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt,
+ sizeof(*ret_mws), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(ret_mws))
+ return ERR_PTR(-ENOMEM);
+
+ /* Copy the info of detected memory windows */
+ memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws));
+
+ return ret_mws;
+}
+
+/*
+ * idt_init_mws() - initialize memory windows subsystem
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * Scan BAR setup registers of local and peer ports to determine the
+ * outbound and inbound memory windows parameters
+ *
+ * Return: zero on success, otherwise a negative error number
+ */
+static int idt_init_mws(struct idt_ntb_dev *ndev)
+{
+ struct idt_ntb_peer *peer;
+ unsigned char pidx;
+
+ /* Scan memory windows of the local port */
+ ndev->mws = idt_scan_mws(ndev, ndev->port, &ndev->mw_cnt);
+ if (IS_ERR(ndev->mws)) {
+ dev_err(&ndev->ntb.pdev->dev,
+ "Failed to scan mws of local port %hhu", ndev->port);
+ return PTR_ERR(ndev->mws);
+ }
+
+ /* Scan memory windows of the peer ports */
+ for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
+ peer = &ndev->peers[pidx];
+ peer->mws = idt_scan_mws(ndev, peer->port, &peer->mw_cnt);
+ if (IS_ERR(peer->mws)) {
+ dev_err(&ndev->ntb.pdev->dev,
+ "Failed to scan mws of port %hhu", peer->port);
+ return PTR_ERR(peer->mws);
+ }
+ }
+
+ /* Initialize spin locker of the LUT registers */
+ spin_lock_init(&ndev->lut_lock);
+
+ dev_dbg(&ndev->ntb.pdev->dev, "Outbound and inbound MWs initialized");
+
+ return 0;
+}
+
+/*
+ * idt_ntb_mw_count() - number of inbound memory windows (NTB API callback)
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ *
+ * The value is returned for the specified peer, so generally speaking it can
+ * be different for different port depending on the IDT PCIe-switch
+ * initialization.
+ *
+ * Return: the number of memory windows.
+ */
+static int idt_ntb_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ if (pidx < 0 || ndev->peer_cnt <= pidx)
+ return -EINVAL;
+
+ return ndev->peers[pidx].mw_cnt;
+}
+
+/*
+ * idt_ntb_mw_get_align() - inbound memory window parameters (NTB API callback)
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ * @widx: Memory window index.
+ * @addr_align: OUT - the base alignment for translating the memory window
+ * @size_align: OUT - the size alignment for translating the memory window
+ * @size_max: OUT - the maximum size of the memory window
+ *
+ * The peer memory window parameters have already been determined, so just
+ * return the corresponding values, which mustn't change within session.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static int idt_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+ struct idt_ntb_peer *peer;
+
+ if (pidx < 0 || ndev->peer_cnt <= pidx)
+ return -EINVAL;
+
+ peer = &ndev->peers[pidx];
+
+ if (widx < 0 || peer->mw_cnt <= widx)
+ return -EINVAL;
+
+ if (addr_align != NULL)
+ *addr_align = peer->mws[widx].addr_align;
+
+ if (size_align != NULL)
+ *size_align = peer->mws[widx].size_align;
+
+ if (size_max != NULL)
+ *size_max = peer->mws[widx].size_max;
+
+ return 0;
+}
+
+/*
+ * idt_ntb_peer_mw_count() - number of outbound memory windows
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ *
+ * Outbound memory windows parameters have been determined based on the
+ * BAR setup registers value, which are mostly constants within one session.
+ *
+ * Return: the number of memory windows.
+ */
+static int idt_ntb_peer_mw_count(struct ntb_dev *ntb)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ return ndev->mw_cnt;
+}
+
+/*
+ * idt_ntb_peer_mw_get_addr() - get map address of an outbound memory window
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @widx: Memory window index (within ntb_peer_mw_count() return value).
+ * @base: OUT - the base address of mapping region.
+ * @size: OUT - the size of mapping region.
+ *
+ * Return just parameters of BAR resources mapping. Size reflects just the size
+ * of the resource
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static int idt_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx,
+ phys_addr_t *base, resource_size_t *size)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ if (widx < 0 || ndev->mw_cnt <= widx)
+ return -EINVAL;
+
+ /* Mapping address is just properly shifted BAR resource start */
+ if (base != NULL)
+ *base = pci_resource_start(ntb->pdev, ndev->mws[widx].bar) +
+ ndev->mws[widx].idx * ndev->mws[widx].size_max;
+
+ /* Mapping size has already been calculated at MWs scanning */
+ if (size != NULL)
+ *size = ndev->mws[widx].size_max;
+
+ return 0;
+}
+
+/*
+ * idt_ntb_peer_mw_set_trans() - set a translation address of a memory window
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device the translation address received from.
+ * @widx: Memory window index.
+ * @addr: The dma address of the shared memory to access.
+ * @size: The size of the shared memory to access.
+ *
+ * The Direct address translation and LUT base translation is initialized a
+ * bit differenet. Although the parameters restriction are now determined by
+ * the same code.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
+ u64 addr, resource_size_t size)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+ struct idt_mw_cfg *mw_cfg;
+ u32 data = 0, lutoff = 0;
+
+ if (pidx < 0 || ndev->peer_cnt <= pidx)
+ return -EINVAL;
+
+ if (widx < 0 || ndev->mw_cnt <= widx)
+ return -EINVAL;
+
+ /*
+ * Retrieve the memory window config to make sure the passed arguments
+ * fit it restrictions
+ */
+ mw_cfg = &ndev->mws[widx];
+ if (!IS_ALIGNED(addr, mw_cfg->addr_align))
+ return -EINVAL;
+ if (!IS_ALIGNED(size, mw_cfg->size_align) || size > mw_cfg->size_max)
+ return -EINVAL;
+
+ /* DIR and LUT based translations are initialized differently */
+ if (mw_cfg->type == IDT_MW_DIR) {
+ const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar];
+ u64 limit;
+ /* Set destination partition of translation */
+ data = idt_nt_read(ndev, bar->setup);
+ data = SET_FIELD(BARSETUP_TPART, data, ndev->peers[pidx].part);
+ idt_nt_write(ndev, bar->setup, data);
+ /* Set translation base address */
+ idt_nt_write(ndev, bar->ltbase, (u32)addr);
+ idt_nt_write(ndev, bar->utbase, (u32)(addr >> 32));
+ /* Set the custom BAR aperture limit */
+ limit = pci_resource_start(ntb->pdev, mw_cfg->bar) + size;
+ idt_nt_write(ndev, bar->limit, (u32)limit);
+ if (IS_FLD_SET(BARSETUP_TYPE, data, 64))
+ idt_nt_write(ndev, (bar + 1)->limit, (limit >> 32));
+ } else {
+ unsigned long irqflags;
+ /* Initialize corresponding LUT entry */
+ lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) |
+ SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar);
+ data = SET_FIELD(LUTUDATA_PART, 0, ndev->peers[pidx].part) |
+ IDT_LUTUDATA_VALID;
+ spin_lock_irqsave(&ndev->lut_lock, irqflags);
+ idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff);
+ idt_nt_write(ndev, IDT_NT_LUTLDATA, (u32)addr);
+ idt_nt_write(ndev, IDT_NT_LUTMDATA, (u32)(addr >> 32));
+ idt_nt_write(ndev, IDT_NT_LUTUDATA, data);
+ mmiowb();
+ spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
+ /* Limit address isn't specified since size is fixed for LUT */
+ }
+
+ return 0;
+}
+
+/*
+ * idt_ntb_peer_mw_clear_trans() - clear the outbound MW translation address
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ * @widx: Memory window index.
+ *
+ * It effectively disables the translation over the specified outbound MW.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static int idt_ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx,
+ int widx)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+ struct idt_mw_cfg *mw_cfg;
+
+ if (pidx < 0 || ndev->peer_cnt <= pidx)
+ return -EINVAL;
+
+ if (widx < 0 || ndev->mw_cnt <= widx)
+ return -EINVAL;
+
+ mw_cfg = &ndev->mws[widx];
+
+ /* DIR and LUT based translations are initialized differently */
+ if (mw_cfg->type == IDT_MW_DIR) {
+ const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar];
+ u32 data;
+ /* Read BARSETUP to check BAR type */
+ data = idt_nt_read(ndev, bar->setup);
+ /* Disable translation by specifying zero BAR limit */
+ idt_nt_write(ndev, bar->limit, 0);
+ if (IS_FLD_SET(BARSETUP_TYPE, data, 64))
+ idt_nt_write(ndev, (bar + 1)->limit, 0);
+ } else {
+ unsigned long irqflags;
+ u32 lutoff;
+ /* Clear the corresponding LUT entry up */
+ lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) |
+ SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar);
+ spin_lock_irqsave(&ndev->lut_lock, irqflags);
+ idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff);
+ idt_nt_write(ndev, IDT_NT_LUTLDATA, 0);
+ idt_nt_write(ndev, IDT_NT_LUTMDATA, 0);
+ idt_nt_write(ndev, IDT_NT_LUTUDATA, 0);
+ mmiowb();
+ spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
+ }
+
+ return 0;
+}
+
+/*=============================================================================
+ * 5. Doorbell operations
+ *
+ * Doorbell functionality of IDT PCIe-switches is pretty unusual. First of
+ * all there is global doorbell register which state can by changed by any
+ * NT-function of the IDT device in accordance with global permissions. These
+ * permissions configs are not supported by NTB API, so it must be done by
+ * either BIOS or EEPROM settings. In the same way the state of the global
+ * doorbell is reflected to the NT-functions local inbound doorbell registers.
+ * It can lead to situations when client driver sets some peer doorbell bits
+ * and get them bounced back to local inbound doorbell if permissions are
+ * granted.
+ * Secondly there is just one IRQ vector for Doorbell, Message, Temperature
+ * and Switch events, so if client driver left any of Doorbell bits set and
+ * some other event occurred, the driver will be notified of Doorbell event
+ * again.
+ *=============================================================================
+ */
+
+/*
+ * idt_db_isr() - doorbell event ISR
+ * @ndev: IDT NTB hardware driver descriptor
+ * @ntint_sts: NT-function interrupt status
+ *
+ * Doorbell event happans when DBELL bit of NTINTSTS switches from 0 to 1.
+ * It happens only when unmasked doorbell bits are set to ones on completely
+ * zeroed doorbell register.
+ * The method is called from PCIe ISR bottom-half routine.
+ */
+static void idt_db_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
+{
+ /*
+ * Doorbell IRQ status will be cleaned only when client
+ * driver unsets all the doorbell bits.
+ */
+ dev_dbg(&ndev->ntb.pdev->dev, "DB IRQ detected %#08x", ntint_sts);
+
+ /* Notify the client driver of possible doorbell state change */
+ ntb_db_event(&ndev->ntb, 0);
+}
+
+/*
+ * idt_ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ *
+ * IDT PCIe-switches expose just one Doorbell register of DWORD size.
+ *
+ * Return: A mask of doorbell bits supported by the ntb.
+ */
+static u64 idt_ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+ return IDT_DBELL_MASK;
+}
+
+/*
+ * idt_ntb_db_read() - read the local doorbell register (NTB API callback)
+ * @ntb: NTB device context.
+ *
+ * There is just on inbound doorbell register of each NT-function, so
+ * this method return it value.
+ *
+ * Return: The bits currently set in the local doorbell register.
+ */
+static u64 idt_ntb_db_read(struct ntb_dev *ntb)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ return idt_nt_read(ndev, IDT_NT_INDBELLSTS);
+}
+
+/*
+ * idt_ntb_db_clear() - clear bits in the local doorbell register
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * Clear bits of inbound doorbell register by writing ones to it.
+ *
+ * NOTE! Invalid bits are always considered cleared so it's not an error
+ * to clear them over.
+ *
+ * Return: always zero as success.
+ */
+static int idt_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ idt_nt_write(ndev, IDT_NT_INDBELLSTS, (u32)db_bits);
+
+ return 0;
+}
+
+/*
+ * idt_ntb_db_read_mask() - read the local doorbell mask (NTB API callback)
+ * @ntb: NTB device context.
+ *
+ * Each inbound doorbell bit can be masked from generating IRQ by setting
+ * the corresponding bit in inbound doorbell mask. So this method returns
+ * the value of the register.
+ *
+ * Return: The bits currently set in the local doorbell mask register.
+ */
+static u64 idt_ntb_db_read_mask(struct ntb_dev *ntb)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ return idt_nt_read(ndev, IDT_NT_INDBELLMSK);
+}
+
+/*
+ * idt_ntb_db_set_mask() - set bits in the local doorbell mask
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell mask bits to set.
+ *
+ * The inbound doorbell register mask value must be read, then OR'ed with
+ * passed field and only then set back.
+ *
+ * Return: zero on success, negative error if invalid argument passed.
+ */
+static int idt_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ return idt_reg_set_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock,
+ IDT_DBELL_MASK, db_bits);
+}
+
+/*
+ * idt_ntb_db_clear_mask() - clear bits in the local doorbell mask
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * The method just clears the set bits up in accordance with the passed
+ * bitfield. IDT PCIe-switch shall generate an interrupt if there hasn't
+ * been any unmasked bit set before current unmasking. Otherwise IRQ won't
+ * be generated since there is only one IRQ vector for all doorbells.
+ *
+ * Return: always zero as success
+ */
+static int idt_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ idt_reg_clear_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock,
+ db_bits);
+
+ return 0;
+}
+
+/*
+ * idt_ntb_peer_db_set() - set bits in the peer doorbell register
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to set.
+ *
+ * IDT PCIe-switches exposes local outbound doorbell register to change peer
+ * inbound doorbell register state.
+ *
+ * Return: zero on success, negative error if invalid argument passed.
+ */
+static int idt_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ if (db_bits & ~(u64)IDT_DBELL_MASK)
+ return -EINVAL;
+
+ idt_nt_write(ndev, IDT_NT_OUTDBELLSET, (u32)db_bits);
+ return 0;
+}
+
+/*=============================================================================
+ * 6. Messaging operations
+ *
+ * Each NT-function of IDT PCIe-switch has four inbound and four outbound
+ * message registers. Each outbound message register can be connected to one or
+ * even more than one peer inbound message registers by setting global
+ * configurations. Since NTB API permits one-on-one message registers mapping
+ * only, the driver acts in according with that restriction.
+ *=============================================================================
+ */
+
+/*
+ * idt_init_msg() - initialize messaging interface
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * Just initialize the message registers routing tables locker.
+ */
+static void idt_init_msg(struct idt_ntb_dev *ndev)
+{
+ unsigned char midx;
+
+ /* Init the messages routing table lockers */
+ for (midx = 0; midx < IDT_MSG_CNT; midx++)
+ spin_lock_init(&ndev->msg_locks[midx]);
+
+ dev_dbg(&ndev->ntb.pdev->dev, "NTB Messaging initialized");
+}
+
+/*
+ * idt_msg_isr() - message event ISR
+ * @ndev: IDT NTB hardware driver descriptor
+ * @ntint_sts: NT-function interrupt status
+ *
+ * Message event happens when MSG bit of NTINTSTS switches from 0 to 1.
+ * It happens only when unmasked message status bits are set to ones on
+ * completely zeroed message status register.
+ * The method is called from PCIe ISR bottom-half routine.
+ */
+static void idt_msg_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
+{
+ /*
+ * Message IRQ status will be cleaned only when client
+ * driver unsets all the message status bits.
+ */
+ dev_dbg(&ndev->ntb.pdev->dev, "Message IRQ detected %#08x", ntint_sts);
+
+ /* Notify the client driver of possible message status change */
+ ntb_msg_event(&ndev->ntb);
+}
+
+/*
+ * idt_ntb_msg_count() - get the number of message registers (NTB API callback)
+ * @ntb: NTB device context.
+ *
+ * IDT PCIe-switches support four message registers.
+ *
+ * Return: the number of message registers.
+ */
+static int idt_ntb_msg_count(struct ntb_dev *ntb)
+{
+ return IDT_MSG_CNT;
+}
+
+/*
+ * idt_ntb_msg_inbits() - get a bitfield of inbound message registers status
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ *
+ * NT message status register is shared between inbound and outbound message
+ * registers status
+ *
+ * Return: bitfield of inbound message registers.
+ */
+static u64 idt_ntb_msg_inbits(struct ntb_dev *ntb)
+{
+ return (u64)IDT_INMSG_MASK;
+}
+
+/*
+ * idt_ntb_msg_outbits() - get a bitfield of outbound message registers status
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ *
+ * NT message status register is shared between inbound and outbound message
+ * registers status
+ *
+ * Return: bitfield of outbound message registers.
+ */
+static u64 idt_ntb_msg_outbits(struct ntb_dev *ntb)
+{
+ return (u64)IDT_OUTMSG_MASK;
+}
+
+/*
+ * idt_ntb_msg_read_sts() - read the message registers status (NTB API callback)
+ * @ntb: NTB device context.
+ *
+ * IDT PCIe-switches expose message status registers to notify drivers of
+ * incoming data and failures in case if peer message register isn't freed.
+ *
+ * Return: status bits of message registers
+ */
+static u64 idt_ntb_msg_read_sts(struct ntb_dev *ntb)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ return idt_nt_read(ndev, IDT_NT_MSGSTS);
+}
+
+/*
+ * idt_ntb_msg_clear_sts() - clear status bits of message registers
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @sts_bits: Status bits to clear.
+ *
+ * Clear bits in the status register by writing ones.
+ *
+ * NOTE! Invalid bits are always considered cleared so it's not an error
+ * to clear them over.
+ *
+ * Return: always zero as success.
+ */
+static int idt_ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ idt_nt_write(ndev, IDT_NT_MSGSTS, sts_bits);
+
+ return 0;
+}
+
+/*
+ * idt_ntb_msg_set_mask() - set mask of message register status bits
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @mask_bits: Mask bits.
+ *
+ * Mask the message status bits from raising an IRQ.
+ *
+ * Return: zero on success, negative error if invalid argument passed.
+ */
+static int idt_ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ return idt_reg_set_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock,
+ IDT_MSG_MASK, mask_bits);
+}
+
+/*
+ * idt_ntb_msg_clear_mask() - clear message registers mask
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @mask_bits: Mask bits.
+ *
+ * Clear mask of message status bits IRQs.
+ *
+ * Return: always zero as success.
+ */
+static int idt_ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ idt_reg_clear_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock,
+ mask_bits);
+
+ return 0;
+}
+
+/*
+ * idt_ntb_msg_read() - read message register with specified index
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @midx: Message register index
+ * @pidx: OUT - Port index of peer device a message retrieved from
+ * @msg: OUT - Data
+ *
+ * Read data from the specified message register and source register.
+ *
+ * Return: zero on success, negative error if invalid argument passed.
+ */
+static int idt_ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+ if (midx < 0 || IDT_MSG_CNT <= midx)
+ return -EINVAL;
+
+ /* Retrieve source port index of the message */
+ if (pidx != NULL) {
+ u32 srcpart;
+
+ srcpart = idt_nt_read(ndev, ntdata_tbl.msgs[midx].src);
+ *pidx = ndev->part_idx_map[srcpart];
+
+ /* Sanity check partition index (for initial case) */
+ if (*pidx == -EINVAL)
+ *pidx = 0;
+ }
+
+ /* Retrieve data of the corresponding message register */
+ if (msg != NULL)
+ *msg = idt_nt_read(ndev, ntdata_tbl.msgs[midx].in);
+
+ return 0;
+}
+
+/*
+ * idt_ntb_msg_write() - write data to the specified message register
+ * (NTB API callback)
+ * @ntb: NTB device context.
+ * @midx: Message register index
+ * @pidx: Port index of peer device a message being sent to
+ * @msg: Data to send
+ *
+ * Just try to send data to a peer. Message status register should be
+ * checked by client driver.
+ *
+ * Return: zero on success, negative error if invalid argument passed.
+ */
+static int idt_ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx, u32 msg)
+{
+ struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+ unsigned long irqflags;
+ u32 swpmsgctl = 0;
+
+ if (midx < 0 || IDT_MSG_CNT <= midx)
+ return -EINVAL;
+
+ if (pidx < 0 || ndev->peer_cnt <= pidx)
+ return -EINVAL;
+
+ /* Collect the routing information */
+ swpmsgctl = SET_FIELD(SWPxMSGCTL_REG, 0, midx) |
+ SET_FIELD(SWPxMSGCTL_PART, 0, ndev->peers[pidx].part);
+
+ /* Lock the messages routing table of the specified register */
+ spin_lock_irqsave(&ndev->msg_locks[midx], irqflags);
+ /* Set the route and send the data */
+ idt_sw_write(ndev, partdata_tbl[ndev->part].msgctl[midx], swpmsgctl);
+ idt_nt_write(ndev, ntdata_tbl.msgs[midx].out, msg);
+ mmiowb();
+ /* Unlock the messages routing table */
+ spin_unlock_irqrestore(&ndev->msg_locks[midx], irqflags);
+
+ /* Client driver shall check the status register */
+ return 0;
+}
+
+/*=============================================================================
+ * 7. Temperature sensor operations
+ *
+ * IDT PCIe-switch has an embedded temperature sensor, which can be used to
+ * warn a user-space of possible chip overheating. Since workload temperature
+ * can be different on different platforms, temperature thresholds as well as
+ * general sensor settings must be setup in the framework of BIOS/EEPROM
+ * initializations. It includes the actual sensor enabling as well.
+ *=============================================================================
+ */
+
+/*
+ * idt_read_temp() - read temperature from chip sensor
+ * @ntb: NTB device context.
+ * @val: OUT - integer value of temperature
+ * @frac: OUT - fraction
+ */
+static void idt_read_temp(struct idt_ntb_dev *ndev, unsigned char *val,
+ unsigned char *frac)
+{
+ u32 data;
+
+ /* Read the data from TEMP field of the TMPSTS register */
+ data = idt_sw_read(ndev, IDT_SW_TMPSTS);
+ data = GET_FIELD(TMPSTS_TEMP, data);
+ /* TEMP field has one fractional bit and seven integer bits */
+ *val = data >> 1;
+ *frac = ((data & 0x1) ? 5 : 0);
+}
+
+/*
+ * idt_temp_isr() - temperature sensor alarm events ISR
+ * @ndev: IDT NTB hardware driver descriptor
+ * @ntint_sts: NT-function interrupt status
+ *
+ * It handles events of temperature crossing alarm thresholds. Since reading
+ * of TMPALARM register clears it up, the function doesn't analyze the
+ * read value, instead the current temperature value just warningly printed to
+ * log.
+ * The method is called from PCIe ISR bottom-half routine.
+ */
+static void idt_temp_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
+{
+ unsigned char val, frac;
+
+ /* Read the current temperature value */
+ idt_read_temp(ndev, &val, &frac);
+
+ /* Read the temperature alarm to clean the alarm status out */
+ /*(void)idt_sw_read(ndev, IDT_SW_TMPALARM);*/
+
+ /* Clean the corresponding interrupt bit */
+ idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_TMPSENSOR);
+
+ dev_dbg(&ndev->ntb.pdev->dev,
+ "Temp sensor IRQ detected %#08x", ntint_sts);
+
+ /* Print temperature value to log */
+ dev_warn(&ndev->ntb.pdev->dev, "Temperature %hhu.%hhu", val, frac);
+}
+
+/*=============================================================================
+ * 8. ISRs related operations
+ *
+ * IDT PCIe-switch has strangely developed IRQ system. There is just one
+ * interrupt vector for doorbell and message registers. So the hardware driver
+ * can't determine actual source of IRQ if, for example, message event happened
+ * while any of unmasked doorbell is still set. The similar situation may be if
+ * switch or temperature sensor events pop up. The difference is that SEVENT
+ * and TMPSENSOR bits of NT interrupt status register can be cleaned by
+ * IRQ handler so a next interrupt request won't have false handling of
+ * corresponding events.
+ * The hardware driver has only bottom-half handler of the IRQ, since if any
+ * of events happened the device won't raise it again before the last one is
+ * handled by clearing of corresponding NTINTSTS bit.
+ *=============================================================================
+ */
+
+static irqreturn_t idt_thread_isr(int irq, void *devid);
+
+/*
+ * idt_init_isr() - initialize PCIe interrupt handler
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * Return: zero on success, otherwise a negative error number.
+ */
+static int idt_init_isr(struct idt_ntb_dev *ndev)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+ u32 ntint_mask;
+ int ret;
+
+ /* Allocate just one interrupt vector for the ISR */
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (ret != 1) {
+ dev_err(&pdev->dev, "Failed to allocate IRQ vector");
+ return ret;
+ }
+
+ /* Retrieve the IRQ vector */
+ ret = pci_irq_vector(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ vector");
+ goto err_free_vectors;
+ }
+
+ /* Set the IRQ handler */
+ ret = devm_request_threaded_irq(&pdev->dev, ret, NULL, idt_thread_isr,
+ IRQF_ONESHOT, NTB_IRQNAME, ndev);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to set MSI IRQ handler, %d", ret);
+ goto err_free_vectors;
+ }
+
+ /* Unmask Message/Doorbell/SE/Temperature interrupts */
+ ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) & ~IDT_NTINTMSK_ALL;
+ idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask);
+
+ /* From now on the interrupts are enabled */
+ dev_dbg(&pdev->dev, "NTB interrupts initialized");
+
+ return 0;
+
+err_free_vectors:
+ pci_free_irq_vectors(pdev);
+
+ return ret;
+}
+
+
+/*
+ * idt_deinit_ist() - deinitialize PCIe interrupt handler
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * Disable corresponding interrupts and free allocated IRQ vectors.
+ */
+static void idt_deinit_isr(struct idt_ntb_dev *ndev)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+ u32 ntint_mask;
+
+ /* Mask interrupts back */
+ ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) | IDT_NTINTMSK_ALL;
+ idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask);
+
+ /* Manually free IRQ otherwise PCI free irq vectors will fail */
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 0), ndev);
+
+ /* Free allocated IRQ vectors */
+ pci_free_irq_vectors(pdev);
+
+ dev_dbg(&pdev->dev, "NTB interrupts deinitialized");
+}
+
+/*
+ * idt_thread_isr() - NT function interrupts handler
+ * @irq: IRQ number
+ * @devid: Custom buffer
+ *
+ * It reads current NT interrupts state register and handles all the event
+ * it declares.
+ * The method is bottom-half routine of actual default PCIe IRQ handler.
+ */
+static irqreturn_t idt_thread_isr(int irq, void *devid)
+{
+ struct idt_ntb_dev *ndev = devid;
+ bool handled = false;
+ u32 ntint_sts;
+
+ /* Read the NT interrupts status register */
+ ntint_sts = idt_nt_read(ndev, IDT_NT_NTINTSTS);
+
+ /* Handle messaging interrupts */
+ if (ntint_sts & IDT_NTINTSTS_MSG) {
+ idt_msg_isr(ndev, ntint_sts);
+ handled = true;
+ }
+
+ /* Handle doorbell interrupts */
+ if (ntint_sts & IDT_NTINTSTS_DBELL) {
+ idt_db_isr(ndev, ntint_sts);
+ handled = true;
+ }
+
+ /* Handle switch event interrupts */
+ if (ntint_sts & IDT_NTINTSTS_SEVENT) {
+ idt_se_isr(ndev, ntint_sts);
+ handled = true;
+ }
+
+ /* Handle temperature sensor interrupt */
+ if (ntint_sts & IDT_NTINTSTS_TMPSENSOR) {
+ idt_temp_isr(ndev, ntint_sts);
+ handled = true;
+ }
+
+ dev_dbg(&ndev->ntb.pdev->dev, "IDT IRQs 0x%08x handled", ntint_sts);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*===========================================================================
+ * 9. NTB hardware driver initialization
+ *===========================================================================
+ */
+
+/*
+ * NTB API operations
+ */
+static const struct ntb_dev_ops idt_ntb_ops = {
+ .port_number = idt_ntb_port_number,
+ .peer_port_count = idt_ntb_peer_port_count,
+ .peer_port_number = idt_ntb_peer_port_number,
+ .peer_port_idx = idt_ntb_peer_port_idx,
+ .link_is_up = idt_ntb_link_is_up,
+ .link_enable = idt_ntb_link_enable,
+ .link_disable = idt_ntb_link_disable,
+ .mw_count = idt_ntb_mw_count,
+ .mw_get_align = idt_ntb_mw_get_align,
+ .peer_mw_count = idt_ntb_peer_mw_count,
+ .peer_mw_get_addr = idt_ntb_peer_mw_get_addr,
+ .peer_mw_set_trans = idt_ntb_peer_mw_set_trans,
+ .peer_mw_clear_trans = idt_ntb_peer_mw_clear_trans,
+ .db_valid_mask = idt_ntb_db_valid_mask,
+ .db_read = idt_ntb_db_read,
+ .db_clear = idt_ntb_db_clear,
+ .db_read_mask = idt_ntb_db_read_mask,
+ .db_set_mask = idt_ntb_db_set_mask,
+ .db_clear_mask = idt_ntb_db_clear_mask,
+ .peer_db_set = idt_ntb_peer_db_set,
+ .msg_count = idt_ntb_msg_count,
+ .msg_inbits = idt_ntb_msg_inbits,
+ .msg_outbits = idt_ntb_msg_outbits,
+ .msg_read_sts = idt_ntb_msg_read_sts,
+ .msg_clear_sts = idt_ntb_msg_clear_sts,
+ .msg_set_mask = idt_ntb_msg_set_mask,
+ .msg_clear_mask = idt_ntb_msg_clear_mask,
+ .msg_read = idt_ntb_msg_read,
+ .msg_write = idt_ntb_msg_write
+};
+
+/*
+ * idt_register_device() - register IDT NTB device
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * Return: zero on success, otherwise a negative error number.
+ */
+static int idt_register_device(struct idt_ntb_dev *ndev)
+{
+ int ret;
+
+ /* Initialize the rest of NTB device structure and register it */
+ ndev->ntb.ops = &idt_ntb_ops;
+ ndev->ntb.topo = NTB_TOPO_PRI;
+
+ ret = ntb_register_device(&ndev->ntb);
+ if (ret != 0) {
+ dev_err(&ndev->ntb.pdev->dev, "Failed to register NTB device");
+ return ret;
+ }
+
+ dev_dbg(&ndev->ntb.pdev->dev, "NTB device successfully registered");
+
+ return 0;
+}
+
+/*
+ * idt_unregister_device() - unregister IDT NTB device
+ * @ndev: IDT NTB hardware driver descriptor
+ */
+static void idt_unregister_device(struct idt_ntb_dev *ndev)
+{
+ /* Just unregister the NTB device */
+ ntb_unregister_device(&ndev->ntb);
+
+ dev_dbg(&ndev->ntb.pdev->dev, "NTB device unregistered");
+}
+
+/*=============================================================================
+ * 10. DebugFS node initialization
+ *=============================================================================
+ */
+
+static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp);
+
+/*
+ * Driver DebugFS info file operations
+ */
+static const struct file_operations idt_dbgfs_info_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = idt_dbgfs_info_read
+};
+
+/*
+ * idt_dbgfs_info_read() - DebugFS read info node callback
+ * @file: File node descriptor.
+ * @ubuf: User-space buffer to put data to
+ * @count: Size of the buffer
+ * @offp: Offset within the buffer
+ */
+static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct idt_ntb_dev *ndev = filp->private_data;
+ unsigned char temp, frac, idx, pidx, cnt;
+ ssize_t ret = 0, off = 0;
+ unsigned long irqflags;
+ enum ntb_speed speed;
+ enum ntb_width width;
+ char *strbuf;
+ size_t size;
+ u32 data;
+
+ /* Lets limit the buffer size the way the Intel/AMD drivers do */
+ size = min_t(size_t, count, 0x1000U);
+
+ /* Allocate the memory for the buffer */
+ strbuf = kmalloc(size, GFP_KERNEL);
+ if (strbuf == NULL)
+ return -ENOMEM;
+
+ /* Put the data into the string buffer */
+ off += scnprintf(strbuf + off, size - off,
+ "\n\t\tIDT NTB device Information:\n\n");
+
+ /* General local device configurations */
+ off += scnprintf(strbuf + off, size - off,
+ "Local Port %hhu, Partition %hhu\n", ndev->port, ndev->part);
+
+ /* Peer ports information */
+ off += scnprintf(strbuf + off, size - off, "Peers:\n");
+ for (idx = 0; idx < ndev->peer_cnt; idx++) {
+ off += scnprintf(strbuf + off, size - off,
+ "\t%hhu. Port %hhu, Partition %hhu\n",
+ idx, ndev->peers[idx].port, ndev->peers[idx].part);
+ }
+
+ /* Links status */
+ data = idt_ntb_link_is_up(&ndev->ntb, &speed, &width);
+ off += scnprintf(strbuf + off, size - off,
+ "NTB link status\t- 0x%08x, ", data);
+ off += scnprintf(strbuf + off, size - off, "PCIe Gen %d x%d lanes\n",
+ speed, width);
+
+ /* Mapping table entries */
+ off += scnprintf(strbuf + off, size - off, "NTB Mapping Table:\n");
+ for (idx = 0; idx < IDT_MTBL_ENTRY_CNT; idx++) {
+ spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
+ idt_nt_write(ndev, IDT_NT_NTMTBLADDR, idx);
+ data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
+ spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
+
+ /* Print valid entries only */
+ if (data & IDT_NTMTBLDATA_VALID) {
+ off += scnprintf(strbuf + off, size - off,
+ "\t%hhu. Partition %d, Requester ID 0x%04x\n",
+ idx, GET_FIELD(NTMTBLDATA_PART, data),
+ GET_FIELD(NTMTBLDATA_REQID, data));
+ }
+ }
+ off += scnprintf(strbuf + off, size - off, "\n");
+
+ /* Outbound memory windows information */
+ off += scnprintf(strbuf + off, size - off,
+ "Outbound Memory Windows:\n");
+ for (idx = 0; idx < ndev->mw_cnt; idx += cnt) {
+ data = ndev->mws[idx].type;
+ cnt = idt_get_mw_count(data);
+
+ /* Print Memory Window information */
+ if (data == IDT_MW_DIR)
+ off += scnprintf(strbuf + off, size - off,
+ "\t%hhu.\t", idx);
+ else
+ off += scnprintf(strbuf + off, size - off,
+ "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+
+ off += scnprintf(strbuf + off, size - off, "%s BAR%hhu, ",
+ idt_get_mw_name(data), ndev->mws[idx].bar);
+
+ off += scnprintf(strbuf + off, size - off,
+ "Address align 0x%08llx, ", ndev->mws[idx].addr_align);
+
+ off += scnprintf(strbuf + off, size - off,
+ "Size align 0x%08llx, Size max %llu\n",
+ ndev->mws[idx].size_align, ndev->mws[idx].size_max);
+ }
+
+ /* Inbound memory windows information */
+ for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
+ off += scnprintf(strbuf + off, size - off,
+ "Inbound Memory Windows for peer %hhu (Port %hhu):\n",
+ pidx, ndev->peers[pidx].port);
+
+ /* Print Memory Windows information */
+ for (idx = 0; idx < ndev->peers[pidx].mw_cnt; idx += cnt) {
+ data = ndev->peers[pidx].mws[idx].type;
+ cnt = idt_get_mw_count(data);
+
+ if (data == IDT_MW_DIR)
+ off += scnprintf(strbuf + off, size - off,
+ "\t%hhu.\t", idx);
+ else
+ off += scnprintf(strbuf + off, size - off,
+ "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+
+ off += scnprintf(strbuf + off, size - off,
+ "%s BAR%hhu, ", idt_get_mw_name(data),
+ ndev->peers[pidx].mws[idx].bar);
+
+ off += scnprintf(strbuf + off, size - off,
+ "Address align 0x%08llx, ",
+ ndev->peers[pidx].mws[idx].addr_align);
+
+ off += scnprintf(strbuf + off, size - off,
+ "Size align 0x%08llx, Size max %llu\n",
+ ndev->peers[pidx].mws[idx].size_align,
+ ndev->peers[pidx].mws[idx].size_max);
+ }
+ }
+ off += scnprintf(strbuf + off, size - off, "\n");
+
+ /* Doorbell information */
+ data = idt_sw_read(ndev, IDT_SW_GDBELLSTS);
+ off += scnprintf(strbuf + off, size - off,
+ "Global Doorbell state\t- 0x%08x\n", data);
+ data = idt_ntb_db_read(&ndev->ntb);
+ off += scnprintf(strbuf + off, size - off,
+ "Local Doorbell state\t- 0x%08x\n", data);
+ data = idt_nt_read(ndev, IDT_NT_INDBELLMSK);
+ off += scnprintf(strbuf + off, size - off,
+ "Local Doorbell mask\t- 0x%08x\n", data);
+ off += scnprintf(strbuf + off, size - off, "\n");
+
+ /* Messaging information */
+ off += scnprintf(strbuf + off, size - off,
+ "Message event valid\t- 0x%08x\n", IDT_MSG_MASK);
+ data = idt_ntb_msg_read_sts(&ndev->ntb);
+ off += scnprintf(strbuf + off, size - off,
+ "Message event status\t- 0x%08x\n", data);
+ data = idt_nt_read(ndev, IDT_NT_MSGSTSMSK);
+ off += scnprintf(strbuf + off, size - off,
+ "Message event mask\t- 0x%08x\n", data);
+ off += scnprintf(strbuf + off, size - off,
+ "Message data:\n");
+ for (idx = 0; idx < IDT_MSG_CNT; idx++) {
+ int src;
+ (void)idt_ntb_msg_read(&ndev->ntb, idx, &src, &data);
+ off += scnprintf(strbuf + off, size - off,
+ "\t%hhu. 0x%08x from peer %hhu (Port %hhu)\n",
+ idx, data, src, ndev->peers[src].port);
+ }
+ off += scnprintf(strbuf + off, size - off, "\n");
+
+ /* Current temperature */
+ idt_read_temp(ndev, &temp, &frac);
+ off += scnprintf(strbuf + off, size - off,
+ "Switch temperature\t\t- %hhu.%hhuC\n", temp, frac);
+
+ /* Copy the buffer to the User Space */
+ ret = simple_read_from_buffer(ubuf, count, offp, strbuf, off);
+ kfree(strbuf);
+
+ return ret;
+}
+
+/*
+ * idt_init_dbgfs() - initialize DebugFS node
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * Return: zero on success, otherwise a negative error number.
+ */
+static int idt_init_dbgfs(struct idt_ntb_dev *ndev)
+{
+ char devname[64];
+
+ /* If the top directory is not created then do nothing */
+ if (IS_ERR_OR_NULL(dbgfs_topdir)) {
+ dev_info(&ndev->ntb.pdev->dev, "Top DebugFS directory absent");
+ return PTR_ERR(dbgfs_topdir);
+ }
+
+ /* Create the info file node */
+ snprintf(devname, 64, "info:%s", pci_name(ndev->ntb.pdev));
+ ndev->dbgfs_info = debugfs_create_file(devname, 0400, dbgfs_topdir,
+ ndev, &idt_dbgfs_info_ops);
+ if (IS_ERR(ndev->dbgfs_info)) {
+ dev_dbg(&ndev->ntb.pdev->dev, "Failed to create DebugFS node");
+ return PTR_ERR(ndev->dbgfs_info);
+ }
+
+ dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node created");
+
+ return 0;
+}
+
+/*
+ * idt_deinit_dbgfs() - deinitialize DebugFS node
+ * @ndev: IDT NTB hardware driver descriptor
+ *
+ * Just discard the info node from DebugFS
+ */
+static void idt_deinit_dbgfs(struct idt_ntb_dev *ndev)
+{
+ debugfs_remove(ndev->dbgfs_info);
+
+ dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node discarded");
+}
+
+/*=============================================================================
+ * 11. Basic PCIe device initialization
+ *=============================================================================
+ */
+
+/*
+ * idt_check_setup() - Check whether the IDT PCIe-swtich is properly
+ * pre-initialized
+ * @pdev: Pointer to the PCI device descriptor
+ *
+ * Return: zero on success, otherwise a negative error number.
+ */
+static int idt_check_setup(struct pci_dev *pdev)
+{
+ u32 data;
+ int ret;
+
+ /* Read the BARSETUP0 */
+ ret = pci_read_config_dword(pdev, IDT_NT_BARSETUP0, &data);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "Failed to read BARSETUP0 config register");
+ return ret;
+ }
+
+ /* Check whether the BAR0 register is enabled to be of config space */
+ if (!(data & IDT_BARSETUP_EN) || !(data & IDT_BARSETUP_MODE_CFG)) {
+ dev_err(&pdev->dev, "BAR0 doesn't map config space");
+ return -EINVAL;
+ }
+
+ /* Configuration space BAR0 must have certain size */
+ if ((data & IDT_BARSETUP_SIZE_MASK) != IDT_BARSETUP_SIZE_CFG) {
+ dev_err(&pdev->dev, "Invalid size of config space");
+ return -EINVAL;
+ }
+
+ dev_dbg(&pdev->dev, "NTB device pre-initialized correctly");
+
+ return 0;
+}
+
+/*
+ * Create the IDT PCIe-switch driver descriptor
+ * @pdev: Pointer to the PCI device descriptor
+ * @id: IDT PCIe-device configuration
+ *
+ * It just allocates a memory for IDT PCIe-switch device structure and
+ * initializes some commonly used fields.
+ *
+ * No need of release method, since managed device resource is used for
+ * memory allocation.
+ *
+ * Return: pointer to the descriptor, otherwise a negative error number.
+ */
+static struct idt_ntb_dev *idt_create_dev(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct idt_ntb_dev *ndev;
+
+ /* Allocate memory for the IDT PCIe-device descriptor */
+ ndev = devm_kzalloc(&pdev->dev, sizeof(*ndev), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(ndev)) {
+ dev_err(&pdev->dev, "Memory allocation failed for descriptor");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Save the IDT PCIe-switch ports configuration */
+ ndev->swcfg = (struct idt_89hpes_cfg *)id->driver_data;
+ /* Save the PCI-device pointer inside the NTB device structure */
+ ndev->ntb.pdev = pdev;
+
+ /* Initialize spin locker of Doorbell, Message and GASA registers */
+ spin_lock_init(&ndev->db_mask_lock);
+ spin_lock_init(&ndev->msg_mask_lock);
+ spin_lock_init(&ndev->gasa_lock);
+
+ dev_info(&pdev->dev, "IDT %s discovered", ndev->swcfg->name);
+
+ dev_dbg(&pdev->dev, "NTB device descriptor created");
+
+ return ndev;
+}
+
+/*
+ * idt_init_pci() - initialize the basic PCI-related subsystem
+ * @ndev: Pointer to the IDT PCIe-switch driver descriptor
+ *
+ * Managed device resources will be freed automatically in case of failure or
+ * driver detachment.
+ *
+ * Return: zero on success, otherwise negative error number.
+ */
+static int idt_init_pci(struct idt_ntb_dev *ndev)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+ int ret;
+
+ /* Initialize the bit mask of DMA */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret != 0) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to set DMA bit mask\n");
+ return ret;
+ }
+ dev_warn(&pdev->dev, "Cannot set DMA highmem bit mask\n");
+ }
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret != 0) {
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "Failed to set consistent DMA bit mask\n");
+ return ret;
+ }
+ dev_warn(&pdev->dev,
+ "Cannot set consistent DMA highmem bit mask\n");
+ }
+
+ /*
+ * Enable the device advanced error reporting. It's not critical to
+ * have AER disabled in the kernel.
+ */
+ ret = pci_enable_pcie_error_reporting(pdev);
+ if (ret != 0)
+ dev_warn(&pdev->dev, "PCIe AER capability disabled\n");
+ else /* Cleanup uncorrectable error status before getting to init */
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ /* First enable the PCI device */
+ ret = pcim_enable_device(pdev);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to enable PCIe device\n");
+ goto err_disable_aer;
+ }
+
+ /*
+ * Enable the bus mastering, which effectively enables MSI IRQs and
+ * Request TLPs translation
+ */
+ pci_set_master(pdev);
+
+ /* Request all BARs resources and map BAR0 only */
+ ret = pcim_iomap_regions_request_all(pdev, 1, NTB_NAME);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to request resources\n");
+ goto err_clear_master;
+ }
+
+ /* Retrieve virtual address of BAR0 - PCI configuration space */
+ ndev->cfgspc = pcim_iomap_table(pdev)[0];
+
+ /* Put the IDT driver data pointer to the PCI-device private pointer */
+ pci_set_drvdata(pdev, ndev);
+
+ dev_dbg(&pdev->dev, "NT-function PCIe interface initialized");
+
+ return 0;
+
+err_clear_master:
+ pci_clear_master(pdev);
+err_disable_aer:
+ (void)pci_disable_pcie_error_reporting(pdev);
+
+ return ret;
+}
+
+/*
+ * idt_deinit_pci() - deinitialize the basic PCI-related subsystem
+ * @ndev: Pointer to the IDT PCIe-switch driver descriptor
+ *
+ * Managed resources will be freed on the driver detachment
+ */
+static void idt_deinit_pci(struct idt_ntb_dev *ndev)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+
+ /* Clean up the PCI-device private data pointer */
+ pci_set_drvdata(pdev, NULL);
+
+ /* Clear the bus master disabling the Request TLPs translation */
+ pci_clear_master(pdev);
+
+ /* Disable the AER capability */
+ (void)pci_disable_pcie_error_reporting(pdev);
+
+ dev_dbg(&pdev->dev, "NT-function PCIe interface cleared");
+}
+
+/*===========================================================================
+ * 12. PCI bus callback functions
+ *===========================================================================
+ */
+
+/*
+ * idt_pci_probe() - PCI device probe callback
+ * @pdev: Pointer to PCI device structure
+ * @id: PCIe device custom descriptor
+ *
+ * Return: zero on success, otherwise negative error number
+ */
+static int idt_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct idt_ntb_dev *ndev;
+ int ret;
+
+ /* Check whether IDT PCIe-switch is properly pre-initialized */
+ ret = idt_check_setup(pdev);
+ if (ret != 0)
+ return ret;
+
+ /* Allocate the memory for IDT NTB device data */
+ ndev = idt_create_dev(pdev, id);
+ if (IS_ERR_OR_NULL(ndev))
+ return PTR_ERR(ndev);
+
+ /* Initialize the basic PCI subsystem of the device */
+ ret = idt_init_pci(ndev);
+ if (ret != 0)
+ return ret;
+
+ /* Scan ports of the IDT PCIe-switch */
+ (void)idt_scan_ports(ndev);
+
+ /* Initialize NTB link events subsystem */
+ idt_init_link(ndev);
+
+ /* Initialize MWs subsystem */
+ ret = idt_init_mws(ndev);
+ if (ret != 0)
+ goto err_deinit_link;
+
+ /* Initialize Messaging subsystem */
+ idt_init_msg(ndev);
+
+ /* Initialize IDT interrupts handler */
+ ret = idt_init_isr(ndev);
+ if (ret != 0)
+ goto err_deinit_link;
+
+ /* Register IDT NTB devices on the NTB bus */
+ ret = idt_register_device(ndev);
+ if (ret != 0)
+ goto err_deinit_isr;
+
+ /* Initialize DebugFS info node */
+ (void)idt_init_dbgfs(ndev);
+
+ /* IDT PCIe-switch NTB driver is finally initialized */
+ dev_info(&pdev->dev, "IDT NTB device is ready");
+
+ /* May the force be with us... */
+ return 0;
+
+err_deinit_isr:
+ idt_deinit_isr(ndev);
+err_deinit_link:
+ idt_deinit_link(ndev);
+ idt_deinit_pci(ndev);
+
+ return ret;
+}
+
+/*
+ * idt_pci_probe() - PCI device remove callback
+ * @pdev: Pointer to PCI device structure
+ */
+static void idt_pci_remove(struct pci_dev *pdev)
+{
+ struct idt_ntb_dev *ndev = pci_get_drvdata(pdev);
+
+ /* Deinit the DebugFS node */
+ idt_deinit_dbgfs(ndev);
+
+ /* Unregister NTB device */
+ idt_unregister_device(ndev);
+
+ /* Stop the interrupts handling */
+ idt_deinit_isr(ndev);
+
+ /* Deinitialize link event subsystem */
+ idt_deinit_link(ndev);
+
+ /* Deinit basic PCI subsystem */
+ idt_deinit_pci(ndev);
+
+ /* IDT PCIe-switch NTB driver is finally initialized */
+ dev_info(&pdev->dev, "IDT NTB device is removed");
+
+ /* Sayonara... */
+}
+
+/*
+ * IDT PCIe-switch models ports configuration structures
+ */
+static struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
+ .name = "89HPES24NT6AG2",
+ .port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12}
+};
+static struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
+ .name = "89HPES32NT8AG2",
+ .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
+};
+static struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
+ .name = "89HPES32NT8BG2",
+ .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
+};
+static struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
+ .name = "89HPES12NT12G2",
+ .port_cnt = 3, .ports = {0, 8, 16}
+};
+static struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
+ .name = "89HPES16NT16G2",
+ .port_cnt = 4, .ports = {0, 8, 12, 16}
+};
+static struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
+ .name = "89HPES24NT24G2",
+ .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
+};
+static struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
+ .name = "89HPES32NT24AG2",
+ .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
+};
+static struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
+ .name = "89HPES32NT24BG2",
+ .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
+};
+
+/*
+ * PCI-ids table of the supported IDT PCIe-switch devices
+ */
+static const struct pci_device_id idt_pci_tbl[] = {
+ {IDT_PCI_DEVICE_IDS(89HPES24NT6AG2, idt_89hpes24nt6ag2_config)},
+ {IDT_PCI_DEVICE_IDS(89HPES32NT8AG2, idt_89hpes32nt8ag2_config)},
+ {IDT_PCI_DEVICE_IDS(89HPES32NT8BG2, idt_89hpes32nt8bg2_config)},
+ {IDT_PCI_DEVICE_IDS(89HPES12NT12G2, idt_89hpes12nt12g2_config)},
+ {IDT_PCI_DEVICE_IDS(89HPES16NT16G2, idt_89hpes16nt16g2_config)},
+ {IDT_PCI_DEVICE_IDS(89HPES24NT24G2, idt_89hpes24nt24g2_config)},
+ {IDT_PCI_DEVICE_IDS(89HPES32NT24AG2, idt_89hpes32nt24ag2_config)},
+ {IDT_PCI_DEVICE_IDS(89HPES32NT24BG2, idt_89hpes32nt24bg2_config)},
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, idt_pci_tbl);
+
+/*
+ * IDT PCIe-switch NT-function device driver structure definition
+ */
+static struct pci_driver idt_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = idt_pci_probe,
+ .remove = idt_pci_remove,
+ .id_table = idt_pci_tbl,
+};
+
+static int __init idt_pci_driver_init(void)
+{
+ pr_info("%s %s\n", NTB_DESC, NTB_VER);
+
+ /* Create the top DebugFS directory if the FS is initialized */
+ if (debugfs_initialized())
+ dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ /* Register the NTB hardware driver to handle the PCI device */
+ return pci_register_driver(&idt_pci_driver);
+}
+module_init(idt_pci_driver_init);
+
+static void __exit idt_pci_driver_exit(void)
+{
+ /* Unregister the NTB hardware driver */
+ pci_unregister_driver(&idt_pci_driver);
+
+ /* Discard the top DebugFS directory */
+ debugfs_remove_recursive(dbgfs_topdir);
+}
+module_exit(idt_pci_driver_exit);
+
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.h b/drivers/ntb/hw/idt/ntb_hw_idt.h
new file mode 100644
index 000000000000..856fd182f6f4
--- /dev/null
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.h
@@ -0,0 +1,1149 @@
+/*
+ * This file is provided under a GPLv2 license. When using or
+ * redistributing this file, you may do so under that license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (C) 2016 T-Platforms All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+ * Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, one can be found http://www.gnu.org/licenses/.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * IDT PCIe-switch NTB Linux driver
+ *
+ * Contact Information:
+ * Serge Semin <fancer.lancer@gmail.com>, <Sergey.Semin@t-platforms.ru>
+ */
+
+#ifndef NTB_HW_IDT_H
+#define NTB_HW_IDT_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/ntb.h>
+
+
+/*
+ * Macro is used to create the struct pci_device_id that matches
+ * the supported IDT PCIe-switches
+ * @devname: Capitalized name of the particular device
+ * @data: Variable passed to the driver of the particular device
+ */
+#define IDT_PCI_DEVICE_IDS(devname, data) \
+ .vendor = PCI_VENDOR_ID_IDT, .device = PCI_DEVICE_ID_IDT_##devname, \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
+ .class = (PCI_CLASS_BRIDGE_OTHER << 8), .class_mask = (0xFFFF00), \
+ .driver_data = (kernel_ulong_t)&data
+
+/*
+ * IDT PCIe-switches device IDs
+ */
+#define PCI_DEVICE_ID_IDT_89HPES24NT6AG2 0x8091
+#define PCI_DEVICE_ID_IDT_89HPES32NT8AG2 0x808F
+#define PCI_DEVICE_ID_IDT_89HPES32NT8BG2 0x8088
+#define PCI_DEVICE_ID_IDT_89HPES12NT12G2 0x8092
+#define PCI_DEVICE_ID_IDT_89HPES16NT16G2 0x8090
+#define PCI_DEVICE_ID_IDT_89HPES24NT24G2 0x808E
+#define PCI_DEVICE_ID_IDT_89HPES32NT24AG2 0x808C
+#define PCI_DEVICE_ID_IDT_89HPES32NT24BG2 0x808A
+
+/*
+ * NT-function Configuration Space registers
+ * NOTE 1) The IDT PCIe-switch internal data is little-endian
+ * so it must be taken into account in the driver
+ * internals.
+ * 2) Additionally the registers should be accessed either
+ * with byte-enables corresponding to their native size or
+ * the size of one DWORD
+ *
+ * So to simplify the driver code, there is only DWORD-sized read/write
+ * operations utilized.
+ */
+/* PCI Express Configuration Space */
+/* PCI Express command/status register (DWORD) */
+#define IDT_NT_PCICMDSTS 0x00004U
+/* PCI Express Device Capabilities (DWORD) */
+#define IDT_NT_PCIEDCAP 0x00044U
+/* PCI Express Device Control/Status (WORD+WORD) */
+#define IDT_NT_PCIEDCTLSTS 0x00048U
+/* PCI Express Link Capabilities (DWORD) */
+#define IDT_NT_PCIELCAP 0x0004CU
+/* PCI Express Link Control/Status (WORD+WORD) */
+#define IDT_NT_PCIELCTLSTS 0x00050U
+/* PCI Express Device Capabilities 2 (DWORD) */
+#define IDT_NT_PCIEDCAP2 0x00064U
+/* PCI Express Device Control 2 (WORD+WORD) */
+#define IDT_NT_PCIEDCTL2 0x00068U
+/* PCI Power Management Control and Status (DWORD) */
+#define IDT_NT_PMCSR 0x000C4U
+/*==========================================*/
+/* IDT Proprietary NT-port-specific registers */
+/* NT-function main control registers */
+/* NT Endpoint Control (DWORD) */
+#define IDT_NT_NTCTL 0x00400U
+/* NT Endpoint Interrupt Status/Mask (DWORD) */
+#define IDT_NT_NTINTSTS 0x00404U
+#define IDT_NT_NTINTMSK 0x00408U
+/* NT Endpoint Signal Data (DWORD) */
+#define IDT_NT_NTSDATA 0x0040CU
+/* NT Endpoint Global Signal (DWORD) */
+#define IDT_NT_NTGSIGNAL 0x00410U
+/* Internal Error Reporting Mask 0/1 (DWORD) */
+#define IDT_NT_NTIERRORMSK0 0x00414U
+#define IDT_NT_NTIERRORMSK1 0x00418U
+/* Doorbel registers */
+/* NT Outbound Doorbell Set (DWORD) */
+#define IDT_NT_OUTDBELLSET 0x00420U
+/* NT Inbound Doorbell Status/Mask (DWORD) */
+#define IDT_NT_INDBELLSTS 0x00428U
+#define IDT_NT_INDBELLMSK 0x0042CU
+/* Message registers */
+/* Outbound Message N (DWORD) */
+#define IDT_NT_OUTMSG0 0x00430U
+#define IDT_NT_OUTMSG1 0x00434U
+#define IDT_NT_OUTMSG2 0x00438U
+#define IDT_NT_OUTMSG3 0x0043CU
+/* Inbound Message N (DWORD) */
+#define IDT_NT_INMSG0 0x00440U
+#define IDT_NT_INMSG1 0x00444U
+#define IDT_NT_INMSG2 0x00448U
+#define IDT_NT_INMSG3 0x0044CU
+/* Inbound Message Source N (DWORD) */
+#define IDT_NT_INMSGSRC0 0x00450U
+#define IDT_NT_INMSGSRC1 0x00454U
+#define IDT_NT_INMSGSRC2 0x00458U
+#define IDT_NT_INMSGSRC3 0x0045CU
+/* Message Status (DWORD) */
+#define IDT_NT_MSGSTS 0x00460U
+/* Message Status Mask (DWORD) */
+#define IDT_NT_MSGSTSMSK 0x00464U
+/* BAR-setup registers */
+/* BAR N Setup/Limit Address/Lower and Upper Translated Base Address (DWORD) */
+#define IDT_NT_BARSETUP0 0x00470U
+#define IDT_NT_BARLIMIT0 0x00474U
+#define IDT_NT_BARLTBASE0 0x00478U
+#define IDT_NT_BARUTBASE0 0x0047CU
+#define IDT_NT_BARSETUP1 0x00480U
+#define IDT_NT_BARLIMIT1 0x00484U
+#define IDT_NT_BARLTBASE1 0x00488U
+#define IDT_NT_BARUTBASE1 0x0048CU
+#define IDT_NT_BARSETUP2 0x00490U
+#define IDT_NT_BARLIMIT2 0x00494U
+#define IDT_NT_BARLTBASE2 0x00498U
+#define IDT_NT_BARUTBASE2 0x0049CU
+#define IDT_NT_BARSETUP3 0x004A0U
+#define IDT_NT_BARLIMIT3 0x004A4U
+#define IDT_NT_BARLTBASE3 0x004A8U
+#define IDT_NT_BARUTBASE3 0x004ACU
+#define IDT_NT_BARSETUP4 0x004B0U
+#define IDT_NT_BARLIMIT4 0x004B4U
+#define IDT_NT_BARLTBASE4 0x004B8U
+#define IDT_NT_BARUTBASE4 0x004BCU
+#define IDT_NT_BARSETUP5 0x004C0U
+#define IDT_NT_BARLIMIT5 0x004C4U
+#define IDT_NT_BARLTBASE5 0x004C8U
+#define IDT_NT_BARUTBASE5 0x004CCU
+/* NT mapping table registers */
+/* NT Mapping Table Address/Status/Data (DWORD) */
+#define IDT_NT_NTMTBLADDR 0x004D0U
+#define IDT_NT_NTMTBLSTS 0x004D4U
+#define IDT_NT_NTMTBLDATA 0x004D8U
+/* Requester ID (Bus:Device:Function) Capture (DWORD) */
+#define IDT_NT_REQIDCAP 0x004DCU
+/* Memory Windows Lookup table registers */
+/* Lookup Table Offset/Lower, Middle and Upper data (DWORD) */
+#define IDT_NT_LUTOFFSET 0x004E0U
+#define IDT_NT_LUTLDATA 0x004E4U
+#define IDT_NT_LUTMDATA 0x004E8U
+#define IDT_NT_LUTUDATA 0x004ECU
+/* NT Endpoint Uncorrectable/Correctable Errors Emulation registers (DWORD) */
+#define IDT_NT_NTUEEM 0x004F0U
+#define IDT_NT_NTCEEM 0x004F4U
+/* Global Address Space Access/Data registers (DWARD) */
+#define IDT_NT_GASAADDR 0x00FF8U
+#define IDT_NT_GASADATA 0x00FFCU
+
+/*
+ * IDT PCIe-switch Global Configuration and Status registers
+ */
+/* Port N Configuration register in global space */
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP0_PCIECMDSTS 0x01004U
+#define IDT_SW_NTP0_PCIELCTLSTS 0x01050U
+/* NT-function control register (DWORD) */
+#define IDT_SW_NTP0_NTCTL 0x01400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP0_BARSETUP0 0x01470U
+#define IDT_SW_NTP0_BARLIMIT0 0x01474U
+#define IDT_SW_NTP0_BARLTBASE0 0x01478U
+#define IDT_SW_NTP0_BARUTBASE0 0x0147CU
+#define IDT_SW_NTP0_BARSETUP1 0x01480U
+#define IDT_SW_NTP0_BARLIMIT1 0x01484U
+#define IDT_SW_NTP0_BARLTBASE1 0x01488U
+#define IDT_SW_NTP0_BARUTBASE1 0x0148CU
+#define IDT_SW_NTP0_BARSETUP2 0x01490U
+#define IDT_SW_NTP0_BARLIMIT2 0x01494U
+#define IDT_SW_NTP0_BARLTBASE2 0x01498U
+#define IDT_SW_NTP0_BARUTBASE2 0x0149CU
+#define IDT_SW_NTP0_BARSETUP3 0x014A0U
+#define IDT_SW_NTP0_BARLIMIT3 0x014A4U
+#define IDT_SW_NTP0_BARLTBASE3 0x014A8U
+#define IDT_SW_NTP0_BARUTBASE3 0x014ACU
+#define IDT_SW_NTP0_BARSETUP4 0x014B0U
+#define IDT_SW_NTP0_BARLIMIT4 0x014B4U
+#define IDT_SW_NTP0_BARLTBASE4 0x014B8U
+#define IDT_SW_NTP0_BARUTBASE4 0x014BCU
+#define IDT_SW_NTP0_BARSETUP5 0x014C0U
+#define IDT_SW_NTP0_BARLIMIT5 0x014C4U
+#define IDT_SW_NTP0_BARLTBASE5 0x014C8U
+#define IDT_SW_NTP0_BARUTBASE5 0x014CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP2_PCIECMDSTS 0x05004U
+#define IDT_SW_NTP2_PCIELCTLSTS 0x05050U
+/* NT-function control register (DWORD) */
+#define IDT_SW_NTP2_NTCTL 0x05400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP2_BARSETUP0 0x05470U
+#define IDT_SW_NTP2_BARLIMIT0 0x05474U
+#define IDT_SW_NTP2_BARLTBASE0 0x05478U
+#define IDT_SW_NTP2_BARUTBASE0 0x0547CU
+#define IDT_SW_NTP2_BARSETUP1 0x05480U
+#define IDT_SW_NTP2_BARLIMIT1 0x05484U
+#define IDT_SW_NTP2_BARLTBASE1 0x05488U
+#define IDT_SW_NTP2_BARUTBASE1 0x0548CU
+#define IDT_SW_NTP2_BARSETUP2 0x05490U
+#define IDT_SW_NTP2_BARLIMIT2 0x05494U
+#define IDT_SW_NTP2_BARLTBASE2 0x05498U
+#define IDT_SW_NTP2_BARUTBASE2 0x0549CU
+#define IDT_SW_NTP2_BARSETUP3 0x054A0U
+#define IDT_SW_NTP2_BARLIMIT3 0x054A4U
+#define IDT_SW_NTP2_BARLTBASE3 0x054A8U
+#define IDT_SW_NTP2_BARUTBASE3 0x054ACU
+#define IDT_SW_NTP2_BARSETUP4 0x054B0U
+#define IDT_SW_NTP2_BARLIMIT4 0x054B4U
+#define IDT_SW_NTP2_BARLTBASE4 0x054B8U
+#define IDT_SW_NTP2_BARUTBASE4 0x054BCU
+#define IDT_SW_NTP2_BARSETUP5 0x054C0U
+#define IDT_SW_NTP2_BARLIMIT5 0x054C4U
+#define IDT_SW_NTP2_BARLTBASE5 0x054C8U
+#define IDT_SW_NTP2_BARUTBASE5 0x054CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP4_PCIECMDSTS 0x09004U
+#define IDT_SW_NTP4_PCIELCTLSTS 0x09050U
+/* NT-function control register (DWORD) */
+#define IDT_SW_NTP4_NTCTL 0x09400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP4_BARSETUP0 0x09470U
+#define IDT_SW_NTP4_BARLIMIT0 0x09474U
+#define IDT_SW_NTP4_BARLTBASE0 0x09478U
+#define IDT_SW_NTP4_BARUTBASE0 0x0947CU
+#define IDT_SW_NTP4_BARSETUP1 0x09480U
+#define IDT_SW_NTP4_BARLIMIT1 0x09484U
+#define IDT_SW_NTP4_BARLTBASE1 0x09488U
+#define IDT_SW_NTP4_BARUTBASE1 0x0948CU
+#define IDT_SW_NTP4_BARSETUP2 0x09490U
+#define IDT_SW_NTP4_BARLIMIT2 0x09494U
+#define IDT_SW_NTP4_BARLTBASE2 0x09498U
+#define IDT_SW_NTP4_BARUTBASE2 0x0949CU
+#define IDT_SW_NTP4_BARSETUP3 0x094A0U
+#define IDT_SW_NTP4_BARLIMIT3 0x094A4U
+#define IDT_SW_NTP4_BARLTBASE3 0x094A8U
+#define IDT_SW_NTP4_BARUTBASE3 0x094ACU
+#define IDT_SW_NTP4_BARSETUP4 0x094B0U
+#define IDT_SW_NTP4_BARLIMIT4 0x094B4U
+#define IDT_SW_NTP4_BARLTBASE4 0x094B8U
+#define IDT_SW_NTP4_BARUTBASE4 0x094BCU
+#define IDT_SW_NTP4_BARSETUP5 0x094C0U
+#define IDT_SW_NTP4_BARLIMIT5 0x094C4U
+#define IDT_SW_NTP4_BARLTBASE5 0x094C8U
+#define IDT_SW_NTP4_BARUTBASE5 0x094CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP6_PCIECMDSTS 0x0D004U
+#define IDT_SW_NTP6_PCIELCTLSTS 0x0D050U
+/* NT-function control register (DWORD) */
+#define IDT_SW_NTP6_NTCTL 0x0D400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP6_BARSETUP0 0x0D470U
+#define IDT_SW_NTP6_BARLIMIT0 0x0D474U
+#define IDT_SW_NTP6_BARLTBASE0 0x0D478U
+#define IDT_SW_NTP6_BARUTBASE0 0x0D47CU
+#define IDT_SW_NTP6_BARSETUP1 0x0D480U
+#define IDT_SW_NTP6_BARLIMIT1 0x0D484U
+#define IDT_SW_NTP6_BARLTBASE1 0x0D488U
+#define IDT_SW_NTP6_BARUTBASE1 0x0D48CU
+#define IDT_SW_NTP6_BARSETUP2 0x0D490U
+#define IDT_SW_NTP6_BARLIMIT2 0x0D494U
+#define IDT_SW_NTP6_BARLTBASE2 0x0D498U
+#define IDT_SW_NTP6_BARUTBASE2 0x0D49CU
+#define IDT_SW_NTP6_BARSETUP3 0x0D4A0U
+#define IDT_SW_NTP6_BARLIMIT3 0x0D4A4U
+#define IDT_SW_NTP6_BARLTBASE3 0x0D4A8U
+#define IDT_SW_NTP6_BARUTBASE3 0x0D4ACU
+#define IDT_SW_NTP6_BARSETUP4 0x0D4B0U
+#define IDT_SW_NTP6_BARLIMIT4 0x0D4B4U
+#define IDT_SW_NTP6_BARLTBASE4 0x0D4B8U
+#define IDT_SW_NTP6_BARUTBASE4 0x0D4BCU
+#define IDT_SW_NTP6_BARSETUP5 0x0D4C0U
+#define IDT_SW_NTP6_BARLIMIT5 0x0D4C4U
+#define IDT_SW_NTP6_BARLTBASE5 0x0D4C8U
+#define IDT_SW_NTP6_BARUTBASE5 0x0D4CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP8_PCIECMDSTS 0x11004U
+#define IDT_SW_NTP8_PCIELCTLSTS 0x11050U
+/* NT-function control register (DWORD) */
+#define IDT_SW_NTP8_NTCTL 0x11400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP8_BARSETUP0 0x11470U
+#define IDT_SW_NTP8_BARLIMIT0 0x11474U
+#define IDT_SW_NTP8_BARLTBASE0 0x11478U
+#define IDT_SW_NTP8_BARUTBASE0 0x1147CU
+#define IDT_SW_NTP8_BARSETUP1 0x11480U
+#define IDT_SW_NTP8_BARLIMIT1 0x11484U
+#define IDT_SW_NTP8_BARLTBASE1 0x11488U
+#define IDT_SW_NTP8_BARUTBASE1 0x1148CU
+#define IDT_SW_NTP8_BARSETUP2 0x11490U
+#define IDT_SW_NTP8_BARLIMIT2 0x11494U
+#define IDT_SW_NTP8_BARLTBASE2 0x11498U
+#define IDT_SW_NTP8_BARUTBASE2 0x1149CU
+#define IDT_SW_NTP8_BARSETUP3 0x114A0U
+#define IDT_SW_NTP8_BARLIMIT3 0x114A4U
+#define IDT_SW_NTP8_BARLTBASE3 0x114A8U
+#define IDT_SW_NTP8_BARUTBASE3 0x114ACU
+#define IDT_SW_NTP8_BARSETUP4 0x114B0U
+#define IDT_SW_NTP8_BARLIMIT4 0x114B4U
+#define IDT_SW_NTP8_BARLTBASE4 0x114B8U
+#define IDT_SW_NTP8_BARUTBASE4 0x114BCU
+#define IDT_SW_NTP8_BARSETUP5 0x114C0U
+#define IDT_SW_NTP8_BARLIMIT5 0x114C4U
+#define IDT_SW_NTP8_BARLTBASE5 0x114C8U
+#define IDT_SW_NTP8_BARUTBASE5 0x114CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP12_PCIECMDSTS 0x19004U
+#define IDT_SW_NTP12_PCIELCTLSTS 0x19050U
+/* NT-function control register (DWORD) */
+#define IDT_SW_NTP12_NTCTL 0x19400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP12_BARSETUP0 0x19470U
+#define IDT_SW_NTP12_BARLIMIT0 0x19474U
+#define IDT_SW_NTP12_BARLTBASE0 0x19478U
+#define IDT_SW_NTP12_BARUTBASE0 0x1947CU
+#define IDT_SW_NTP12_BARSETUP1 0x19480U
+#define IDT_SW_NTP12_BARLIMIT1 0x19484U
+#define IDT_SW_NTP12_BARLTBASE1 0x19488U
+#define IDT_SW_NTP12_BARUTBASE1 0x1948CU
+#define IDT_SW_NTP12_BARSETUP2 0x19490U
+#define IDT_SW_NTP12_BARLIMIT2 0x19494U
+#define IDT_SW_NTP12_BARLTBASE2 0x19498U
+#define IDT_SW_NTP12_BARUTBASE2 0x1949CU
+#define IDT_SW_NTP12_BARSETUP3 0x194A0U
+#define IDT_SW_NTP12_BARLIMIT3 0x194A4U
+#define IDT_SW_NTP12_BARLTBASE3 0x194A8U
+#define IDT_SW_NTP12_BARUTBASE3 0x194ACU
+#define IDT_SW_NTP12_BARSETUP4 0x194B0U
+#define IDT_SW_NTP12_BARLIMIT4 0x194B4U
+#define IDT_SW_NTP12_BARLTBASE4 0x194B8U
+#define IDT_SW_NTP12_BARUTBASE4 0x194BCU
+#define IDT_SW_NTP12_BARSETUP5 0x194C0U
+#define IDT_SW_NTP12_BARLIMIT5 0x194C4U
+#define IDT_SW_NTP12_BARLTBASE5 0x194C8U
+#define IDT_SW_NTP12_BARUTBASE5 0x194CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP16_PCIECMDSTS 0x21004U
+#define IDT_SW_NTP16_PCIELCTLSTS 0x21050U
+/* NT-function control register (DWORD) */
+#define IDT_SW_NTP16_NTCTL 0x21400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP16_BARSETUP0 0x21470U
+#define IDT_SW_NTP16_BARLIMIT0 0x21474U
+#define IDT_SW_NTP16_BARLTBASE0 0x21478U
+#define IDT_SW_NTP16_BARUTBASE0 0x2147CU
+#define IDT_SW_NTP16_BARSETUP1 0x21480U
+#define IDT_SW_NTP16_BARLIMIT1 0x21484U
+#define IDT_SW_NTP16_BARLTBASE1 0x21488U
+#define IDT_SW_NTP16_BARUTBASE1 0x2148CU
+#define IDT_SW_NTP16_BARSETUP2 0x21490U
+#define IDT_SW_NTP16_BARLIMIT2 0x21494U
+#define IDT_SW_NTP16_BARLTBASE2 0x21498U
+#define IDT_SW_NTP16_BARUTBASE2 0x2149CU
+#define IDT_SW_NTP16_BARSETUP3 0x214A0U
+#define IDT_SW_NTP16_BARLIMIT3 0x214A4U
+#define IDT_SW_NTP16_BARLTBASE3 0x214A8U
+#define IDT_SW_NTP16_BARUTBASE3 0x214ACU
+#define IDT_SW_NTP16_BARSETUP4 0x214B0U
+#define IDT_SW_NTP16_BARLIMIT4 0x214B4U
+#define IDT_SW_NTP16_BARLTBASE4 0x214B8U
+#define IDT_SW_NTP16_BARUTBASE4 0x214BCU
+#define IDT_SW_NTP16_BARSETUP5 0x214C0U
+#define IDT_SW_NTP16_BARLIMIT5 0x214C4U
+#define IDT_SW_NTP16_BARLTBASE5 0x214C8U
+#define IDT_SW_NTP16_BARUTBASE5 0x214CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP20_PCIECMDSTS 0x29004U
+#define IDT_SW_NTP20_PCIELCTLSTS 0x29050U
+/* NT-function control register (DWORD) */
+#define IDT_SW_NTP20_NTCTL 0x29400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP20_BARSETUP0 0x29470U
+#define IDT_SW_NTP20_BARLIMIT0 0x29474U
+#define IDT_SW_NTP20_BARLTBASE0 0x29478U
+#define IDT_SW_NTP20_BARUTBASE0 0x2947CU
+#define IDT_SW_NTP20_BARSETUP1 0x29480U
+#define IDT_SW_NTP20_BARLIMIT1 0x29484U
+#define IDT_SW_NTP20_BARLTBASE1 0x29488U
+#define IDT_SW_NTP20_BARUTBASE1 0x2948CU
+#define IDT_SW_NTP20_BARSETUP2 0x29490U
+#define IDT_SW_NTP20_BARLIMIT2 0x29494U
+#define IDT_SW_NTP20_BARLTBASE2 0x29498U
+#define IDT_SW_NTP20_BARUTBASE2 0x2949CU
+#define IDT_SW_NTP20_BARSETUP3 0x294A0U
+#define IDT_SW_NTP20_BARLIMIT3 0x294A4U
+#define IDT_SW_NTP20_BARLTBASE3 0x294A8U
+#define IDT_SW_NTP20_BARUTBASE3 0x294ACU
+#define IDT_SW_NTP20_BARSETUP4 0x294B0U
+#define IDT_SW_NTP20_BARLIMIT4 0x294B4U
+#define IDT_SW_NTP20_BARLTBASE4 0x294B8U
+#define IDT_SW_NTP20_BARUTBASE4 0x294BCU
+#define IDT_SW_NTP20_BARSETUP5 0x294C0U
+#define IDT_SW_NTP20_BARLIMIT5 0x294C4U
+#define IDT_SW_NTP20_BARLTBASE5 0x294C8U
+#define IDT_SW_NTP20_BARUTBASE5 0x294CCU
+/* IDT PCIe-switch control register (DWORD) */
+#define IDT_SW_CTL 0x3E000U
+/* Boot Configuration Vector Status (DWORD) */
+#define IDT_SW_BCVSTS 0x3E004U
+/* Port Clocking Mode (DWORD) */
+#define IDT_SW_PCLKMODE 0x3E008U
+/* Reset Drain Delay (DWORD) */
+#define IDT_SW_RDRAINDELAY 0x3E080U
+/* Port Operating Mode Change Drain Delay (DWORD) */
+#define IDT_SW_POMCDELAY 0x3E084U
+/* Side Effect Delay (DWORD) */
+#define IDT_SW_SEDELAY 0x3E088U
+/* Upstream Secondary Bus Reset Delay (DWORD) */
+#define IDT_SW_SSBRDELAY 0x3E08CU
+/* Switch partition N Control/Status/Failover registers */
+#define IDT_SW_SWPART0CTL 0x3E100U
+#define IDT_SW_SWPART0STS 0x3E104U
+#define IDT_SW_SWPART0FCTL 0x3E108U
+#define IDT_SW_SWPART1CTL 0x3E120U
+#define IDT_SW_SWPART1STS 0x3E124U
+#define IDT_SW_SWPART1FCTL 0x3E128U
+#define IDT_SW_SWPART2CTL 0x3E140U
+#define IDT_SW_SWPART2STS 0x3E144U
+#define IDT_SW_SWPART2FCTL 0x3E148U
+#define IDT_SW_SWPART3CTL 0x3E160U
+#define IDT_SW_SWPART3STS 0x3E164U
+#define IDT_SW_SWPART3FCTL 0x3E168U
+#define IDT_SW_SWPART4CTL 0x3E180U
+#define IDT_SW_SWPART4STS 0x3E184U
+#define IDT_SW_SWPART4FCTL 0x3E188U
+#define IDT_SW_SWPART5CTL 0x3E1A0U
+#define IDT_SW_SWPART5STS 0x3E1A4U
+#define IDT_SW_SWPART5FCTL 0x3E1A8U
+#define IDT_SW_SWPART6CTL 0x3E1C0U
+#define IDT_SW_SWPART6STS 0x3E1C4U
+#define IDT_SW_SWPART6FCTL 0x3E1C8U
+#define IDT_SW_SWPART7CTL 0x3E1E0U
+#define IDT_SW_SWPART7STS 0x3E1E4U
+#define IDT_SW_SWPART7FCTL 0x3E1E8U
+/* Switch port N control and status registers */
+#define IDT_SW_SWPORT0CTL 0x3E200U
+#define IDT_SW_SWPORT0STS 0x3E204U
+#define IDT_SW_SWPORT0FCTL 0x3E208U
+#define IDT_SW_SWPORT2CTL 0x3E240U
+#define IDT_SW_SWPORT2STS 0x3E244U
+#define IDT_SW_SWPORT2FCTL 0x3E248U
+#define IDT_SW_SWPORT4CTL 0x3E280U
+#define IDT_SW_SWPORT4STS 0x3E284U
+#define IDT_SW_SWPORT4FCTL 0x3E288U
+#define IDT_SW_SWPORT6CTL 0x3E2C0U
+#define IDT_SW_SWPORT6STS 0x3E2C4U
+#define IDT_SW_SWPORT6FCTL 0x3E2C8U
+#define IDT_SW_SWPORT8CTL 0x3E300U
+#define IDT_SW_SWPORT8STS 0x3E304U
+#define IDT_SW_SWPORT8FCTL 0x3E308U
+#define IDT_SW_SWPORT12CTL 0x3E380U
+#define IDT_SW_SWPORT12STS 0x3E384U
+#define IDT_SW_SWPORT12FCTL 0x3E388U
+#define IDT_SW_SWPORT16CTL 0x3E400U
+#define IDT_SW_SWPORT16STS 0x3E404U
+#define IDT_SW_SWPORT16FCTL 0x3E408U
+#define IDT_SW_SWPORT20CTL 0x3E480U
+#define IDT_SW_SWPORT20STS 0x3E484U
+#define IDT_SW_SWPORT20FCTL 0x3E488U
+/* Switch Event registers */
+/* Switch Event Status/Mask/Partition mask (DWORD) */
+#define IDT_SW_SESTS 0x3EC00U
+#define IDT_SW_SEMSK 0x3EC04U
+#define IDT_SW_SEPMSK 0x3EC08U
+/* Switch Event Link Up/Down Status/Mask (DWORD) */
+#define IDT_SW_SELINKUPSTS 0x3EC0CU
+#define IDT_SW_SELINKUPMSK 0x3EC10U
+#define IDT_SW_SELINKDNSTS 0x3EC14U
+#define IDT_SW_SELINKDNMSK 0x3EC18U
+/* Switch Event Fundamental Reset Status/Mask (DWORD) */
+#define IDT_SW_SEFRSTSTS 0x3EC1CU
+#define IDT_SW_SEFRSTMSK 0x3EC20U
+/* Switch Event Hot Reset Status/Mask (DWORD) */
+#define IDT_SW_SEHRSTSTS 0x3EC24U
+#define IDT_SW_SEHRSTMSK 0x3EC28U
+/* Switch Event Failover Mask (DWORD) */
+#define IDT_SW_SEFOVRMSK 0x3EC2CU
+/* Switch Event Global Signal Status/Mask (DWORD) */
+#define IDT_SW_SEGSIGSTS 0x3EC30U
+#define IDT_SW_SEGSIGMSK 0x3EC34U
+/* NT Global Doorbell Status (DWORD) */
+#define IDT_SW_GDBELLSTS 0x3EC3CU
+/* Switch partition N message M control (msgs routing table) (DWORD) */
+#define IDT_SW_SWP0MSGCTL0 0x3EE00U
+#define IDT_SW_SWP1MSGCTL0 0x3EE04U
+#define IDT_SW_SWP2MSGCTL0 0x3EE08U
+#define IDT_SW_SWP3MSGCTL0 0x3EE0CU
+#define IDT_SW_SWP4MSGCTL0 0x3EE10U
+#define IDT_SW_SWP5MSGCTL0 0x3EE14U
+#define IDT_SW_SWP6MSGCTL0 0x3EE18U
+#define IDT_SW_SWP7MSGCTL0 0x3EE1CU
+#define IDT_SW_SWP0MSGCTL1 0x3EE20U
+#define IDT_SW_SWP1MSGCTL1 0x3EE24U
+#define IDT_SW_SWP2MSGCTL1 0x3EE28U
+#define IDT_SW_SWP3MSGCTL1 0x3EE2CU
+#define IDT_SW_SWP4MSGCTL1 0x3EE30U
+#define IDT_SW_SWP5MSGCTL1 0x3EE34U
+#define IDT_SW_SWP6MSGCTL1 0x3EE38U
+#define IDT_SW_SWP7MSGCTL1 0x3EE3CU
+#define IDT_SW_SWP0MSGCTL2 0x3EE40U
+#define IDT_SW_SWP1MSGCTL2 0x3EE44U
+#define IDT_SW_SWP2MSGCTL2 0x3EE48U
+#define IDT_SW_SWP3MSGCTL2 0x3EE4CU
+#define IDT_SW_SWP4MSGCTL2 0x3EE50U
+#define IDT_SW_SWP5MSGCTL2 0x3EE54U
+#define IDT_SW_SWP6MSGCTL2 0x3EE58U
+#define IDT_SW_SWP7MSGCTL2 0x3EE5CU
+#define IDT_SW_SWP0MSGCTL3 0x3EE60U
+#define IDT_SW_SWP1MSGCTL3 0x3EE64U
+#define IDT_SW_SWP2MSGCTL3 0x3EE68U
+#define IDT_SW_SWP3MSGCTL3 0x3EE6CU
+#define IDT_SW_SWP4MSGCTL3 0x3EE70U
+#define IDT_SW_SWP5MSGCTL3 0x3EE74U
+#define IDT_SW_SWP6MSGCTL3 0x3EE78U
+#define IDT_SW_SWP7MSGCTL3 0x3EE7CU
+/* SMBus Status and Control registers (DWORD) */
+#define IDT_SW_SMBUSSTS 0x3F188U
+#define IDT_SW_SMBUSCTL 0x3F18CU
+/* Serial EEPROM Interface (DWORD) */
+#define IDT_SW_EEPROMINTF 0x3F190U
+/* MBus I/O Expander Address N (DWORD) */
+#define IDT_SW_IOEXPADDR0 0x3F198U
+#define IDT_SW_IOEXPADDR1 0x3F19CU
+#define IDT_SW_IOEXPADDR2 0x3F1A0U
+#define IDT_SW_IOEXPADDR3 0x3F1A4U
+#define IDT_SW_IOEXPADDR4 0x3F1A8U
+#define IDT_SW_IOEXPADDR5 0x3F1ACU
+/* General Purpose Events Control and Status registers (DWORD) */
+#define IDT_SW_GPECTL 0x3F1B0U
+#define IDT_SW_GPESTS 0x3F1B4U
+/* Temperature sensor Control/Status/Alarm/Adjustment/Slope registers */
+#define IDT_SW_TMPCTL 0x3F1D4U
+#define IDT_SW_TMPSTS 0x3F1D8U
+#define IDT_SW_TMPALARM 0x3F1DCU
+#define IDT_SW_TMPADJ 0x3F1E0U
+#define IDT_SW_TSSLOPE 0x3F1E4U
+/* SMBus Configuration Block header log (DWORD) */
+#define IDT_SW_SMBUSCBHL 0x3F1E8U
+
+/*
+ * Common registers related constants
+ * @IDT_REG_ALIGN: Registers alignment used in the driver
+ * @IDT_REG_PCI_MAX: Maximum PCI configuration space register value
+ * @IDT_REG_SW_MAX: Maximum global register value
+ */
+#define IDT_REG_ALIGN 4
+#define IDT_REG_PCI_MAX 0x00FFFU
+#define IDT_REG_SW_MAX 0x3FFFFU
+
+/*
+ * PCICMDSTS register fields related constants
+ * @IDT_PCICMDSTS_IOAE: I/O access enable
+ * @IDT_PCICMDSTS_MAE: Memory access enable
+ * @IDT_PCICMDSTS_BME: Bus master enable
+ */
+#define IDT_PCICMDSTS_IOAE 0x00000001U
+#define IDT_PCICMDSTS_MAE 0x00000002U
+#define IDT_PCICMDSTS_BME 0x00000004U
+
+/*
+ * PCIEDCAP register fields related constants
+ * @IDT_PCIEDCAP_MPAYLOAD_MASK: Maximum payload size mask
+ * @IDT_PCIEDCAP_MPAYLOAD_FLD: Maximum payload size field offset
+ * @IDT_PCIEDCAP_MPAYLOAD_S128: Max supported payload size of 128 bytes
+ * @IDT_PCIEDCAP_MPAYLOAD_S256: Max supported payload size of 256 bytes
+ * @IDT_PCIEDCAP_MPAYLOAD_S512: Max supported payload size of 512 bytes
+ * @IDT_PCIEDCAP_MPAYLOAD_S1024: Max supported payload size of 1024 bytes
+ * @IDT_PCIEDCAP_MPAYLOAD_S2048: Max supported payload size of 2048 bytes
+ */
+#define IDT_PCIEDCAP_MPAYLOAD_MASK 0x00000007U
+#define IDT_PCIEDCAP_MPAYLOAD_FLD 0
+#define IDT_PCIEDCAP_MPAYLOAD_S128 0x00000000U
+#define IDT_PCIEDCAP_MPAYLOAD_S256 0x00000001U
+#define IDT_PCIEDCAP_MPAYLOAD_S512 0x00000002U
+#define IDT_PCIEDCAP_MPAYLOAD_S1024 0x00000003U
+#define IDT_PCIEDCAP_MPAYLOAD_S2048 0x00000004U
+
+/*
+ * PCIEDCTLSTS registers fields related constants
+ * @IDT_PCIEDCTL_MPS_MASK: Maximum payload size mask
+ * @IDT_PCIEDCTL_MPS_FLD: MPS field offset
+ * @IDT_PCIEDCTL_MPS_S128: Max payload size of 128 bytes
+ * @IDT_PCIEDCTL_MPS_S256: Max payload size of 256 bytes
+ * @IDT_PCIEDCTL_MPS_S512: Max payload size of 512 bytes
+ * @IDT_PCIEDCTL_MPS_S1024: Max payload size of 1024 bytes
+ * @IDT_PCIEDCTL_MPS_S2048: Max payload size of 2048 bytes
+ * @IDT_PCIEDCTL_MPS_S4096: Max payload size of 4096 bytes
+ */
+#define IDT_PCIEDCTLSTS_MPS_MASK 0x000000E0U
+#define IDT_PCIEDCTLSTS_MPS_FLD 5
+#define IDT_PCIEDCTLSTS_MPS_S128 0x00000000U
+#define IDT_PCIEDCTLSTS_MPS_S256 0x00000020U
+#define IDT_PCIEDCTLSTS_MPS_S512 0x00000040U
+#define IDT_PCIEDCTLSTS_MPS_S1024 0x00000060U
+#define IDT_PCIEDCTLSTS_MPS_S2048 0x00000080U
+#define IDT_PCIEDCTLSTS_MPS_S4096 0x000000A0U
+
+/*
+ * PCIELCAP register fields related constants
+ * @IDT_PCIELCAP_PORTNUM_MASK: Port number field mask
+ * @IDT_PCIELCAP_PORTNUM_FLD: Port number field offset
+ */
+#define IDT_PCIELCAP_PORTNUM_MASK 0xFF000000U
+#define IDT_PCIELCAP_PORTNUM_FLD 24
+
+/*
+ * PCIELCTLSTS registers fields related constants
+ * @IDT_PCIELSTS_CLS_MASK: Current link speed mask
+ * @IDT_PCIELSTS_CLS_FLD: Current link speed field offset
+ * @IDT_PCIELSTS_NLW_MASK: Negotiated link width mask
+ * @IDT_PCIELSTS_NLW_FLD: Negotiated link width field offset
+ * @IDT_PCIELSTS_SCLK_COM: Common slot clock configuration
+ */
+#define IDT_PCIELCTLSTS_CLS_MASK 0x000F0000U
+#define IDT_PCIELCTLSTS_CLS_FLD 16
+#define IDT_PCIELCTLSTS_NLW_MASK 0x03F00000U
+#define IDT_PCIELCTLSTS_NLW_FLD 20
+#define IDT_PCIELCTLSTS_SCLK_COM 0x10000000U
+
+/*
+ * NTCTL register fields related constants
+ * @IDT_NTCTL_IDPROTDIS: ID Protection check disable (disable MTBL)
+ * @IDT_NTCTL_CPEN: Completion enable
+ * @IDT_NTCTL_RNS: Request no snoop processing (if MTBL disabled)
+ * @IDT_NTCTL_ATP: Address type processing (if MTBL disabled)
+ */
+#define IDT_NTCTL_IDPROTDIS 0x00000001U
+#define IDT_NTCTL_CPEN 0x00000002U
+#define IDT_NTCTL_RNS 0x00000004U
+#define IDT_NTCTL_ATP 0x00000008U
+
+/*
+ * NTINTSTS register fields related constants
+ * @IDT_NTINTSTS_MSG: Message interrupt bit
+ * @IDT_NTINTSTS_DBELL: Doorbell interrupt bit
+ * @IDT_NTINTSTS_SEVENT: Switch Event interrupt bit
+ * @IDT_NTINTSTS_TMPSENSOR: Temperature sensor interrupt bit
+ */
+#define IDT_NTINTSTS_MSG 0x00000001U
+#define IDT_NTINTSTS_DBELL 0x00000002U
+#define IDT_NTINTSTS_SEVENT 0x00000008U
+#define IDT_NTINTSTS_TMPSENSOR 0x00000080U
+
+/*
+ * NTINTMSK register fields related constants
+ * @IDT_NTINTMSK_MSG: Message interrupt mask bit
+ * @IDT_NTINTMSK_DBELL: Doorbell interrupt mask bit
+ * @IDT_NTINTMSK_SEVENT: Switch Event interrupt mask bit
+ * @IDT_NTINTMSK_TMPSENSOR: Temperature sensor interrupt mask bit
+ * @IDT_NTINTMSK_ALL: All the useful interrupts mask
+ */
+#define IDT_NTINTMSK_MSG 0x00000001U
+#define IDT_NTINTMSK_DBELL 0x00000002U
+#define IDT_NTINTMSK_SEVENT 0x00000008U
+#define IDT_NTINTMSK_TMPSENSOR 0x00000080U
+#define IDT_NTINTMSK_ALL \
+ (IDT_NTINTMSK_MSG | IDT_NTINTMSK_DBELL | \
+ IDT_NTINTMSK_SEVENT | IDT_NTINTMSK_TMPSENSOR)
+
+/*
+ * NTGSIGNAL register fields related constants
+ * @IDT_NTGSIGNAL_SET: Set global signal of the local partition
+ */
+#define IDT_NTGSIGNAL_SET 0x00000001U
+
+/*
+ * BARSETUP register fields related constants
+ * @IDT_BARSETUP_TYPE_MASK: Mask of the TYPE field
+ * @IDT_BARSETUP_TYPE_32: 32-bit addressing BAR
+ * @IDT_BARSETUP_TYPE_64: 64-bit addressing BAR
+ * @IDT_BARSETUP_PREF: Value of the BAR prefetchable field
+ * @IDT_BARSETUP_SIZE_MASK: Mask of the SIZE field
+ * @IDT_BARSETUP_SIZE_FLD: SIZE field offset
+ * @IDT_BARSETUP_SIZE_CFG: SIZE field value in case of config space MODE
+ * @IDT_BARSETUP_MODE_CFG: Configuration space BAR mode
+ * @IDT_BARSETUP_ATRAN_MASK: ATRAN field mask
+ * @IDT_BARSETUP_ATRAN_FLD: ATRAN field offset
+ * @IDT_BARSETUP_ATRAN_DIR: Direct address translation memory window
+ * @IDT_BARSETUP_ATRAN_LUT12: 12-entry lookup table
+ * @IDT_BARSETUP_ATRAN_LUT24: 24-entry lookup table
+ * @IDT_BARSETUP_TPART_MASK: TPART field mask
+ * @IDT_BARSETUP_TPART_FLD: TPART field offset
+ * @IDT_BARSETUP_EN: BAR enable bit
+ */
+#define IDT_BARSETUP_TYPE_MASK 0x00000006U
+#define IDT_BARSETUP_TYPE_FLD 0
+#define IDT_BARSETUP_TYPE_32 0x00000000U
+#define IDT_BARSETUP_TYPE_64 0x00000004U
+#define IDT_BARSETUP_PREF 0x00000008U
+#define IDT_BARSETUP_SIZE_MASK 0x000003F0U
+#define IDT_BARSETUP_SIZE_FLD 4
+#define IDT_BARSETUP_SIZE_CFG 0x000000C0U
+#define IDT_BARSETUP_MODE_CFG 0x00000400U
+#define IDT_BARSETUP_ATRAN_MASK 0x00001800U
+#define IDT_BARSETUP_ATRAN_FLD 11
+#define IDT_BARSETUP_ATRAN_DIR 0x00000000U
+#define IDT_BARSETUP_ATRAN_LUT12 0x00000800U
+#define IDT_BARSETUP_ATRAN_LUT24 0x00001000U
+#define IDT_BARSETUP_TPART_MASK 0x0000E000U
+#define IDT_BARSETUP_TPART_FLD 13
+#define IDT_BARSETUP_EN 0x80000000U
+
+/*
+ * NTMTBLDATA register fields related constants
+ * @IDT_NTMTBLDATA_VALID: Set the MTBL entry being valid
+ * @IDT_NTMTBLDATA_REQID_MASK: Bus:Device:Function field mask
+ * @IDT_NTMTBLDATA_REQID_FLD: Bus:Device:Function field offset
+ * @IDT_NTMTBLDATA_PART_MASK: Partition field mask
+ * @IDT_NTMTBLDATA_PART_FLD: Partition field offset
+ * @IDT_NTMTBLDATA_ATP_TRANS: Enable AT field translation on request TLPs
+ * @IDT_NTMTBLDATA_CNS_INV: Enable No Snoop attribute inversion of
+ * Completion TLPs
+ * @IDT_NTMTBLDATA_RNS_INV: Enable No Snoop attribute inversion of
+ * Request TLPs
+ */
+#define IDT_NTMTBLDATA_VALID 0x00000001U
+#define IDT_NTMTBLDATA_REQID_MASK 0x0001FFFEU
+#define IDT_NTMTBLDATA_REQID_FLD 1
+#define IDT_NTMTBLDATA_PART_MASK 0x000E0000U
+#define IDT_NTMTBLDATA_PART_FLD 17
+#define IDT_NTMTBLDATA_ATP_TRANS 0x20000000U
+#define IDT_NTMTBLDATA_CNS_INV 0x40000000U
+#define IDT_NTMTBLDATA_RNS_INV 0x80000000U
+
+/*
+ * REQIDCAP register fields related constants
+ * @IDT_REQIDCAP_REQID_MASK: Request ID field mask
+ * @IDT_REQIDCAP_REQID_FLD: Request ID field offset
+ */
+#define IDT_REQIDCAP_REQID_MASK 0x0000FFFFU
+#define IDT_REQIDCAP_REQID_FLD 0
+
+/*
+ * LUTOFFSET register fields related constants
+ * @IDT_LUTOFFSET_INDEX_MASK: Lookup table index field mask
+ * @IDT_LUTOFFSET_INDEX_FLD: Lookup table index field offset
+ * @IDT_LUTOFFSET_BAR_MASK: Lookup table BAR select field mask
+ * @IDT_LUTOFFSET_BAR_FLD: Lookup table BAR select field offset
+ */
+#define IDT_LUTOFFSET_INDEX_MASK 0x0000001FU
+#define IDT_LUTOFFSET_INDEX_FLD 0
+#define IDT_LUTOFFSET_BAR_MASK 0x00000700U
+#define IDT_LUTOFFSET_BAR_FLD 8
+
+/*
+ * LUTUDATA register fields related constants
+ * @IDT_LUTUDATA_PART_MASK: Partition field mask
+ * @IDT_LUTUDATA_PART_FLD: Partition field offset
+ * @IDT_LUTUDATA_VALID: Lookup table entry valid bit
+ */
+#define IDT_LUTUDATA_PART_MASK 0x0000000FU
+#define IDT_LUTUDATA_PART_FLD 0
+#define IDT_LUTUDATA_VALID 0x80000000U
+
+/*
+ * SWPARTxSTS register fields related constants
+ * @IDT_SWPARTxSTS_SCI: Switch partition state change initiated
+ * @IDT_SWPARTxSTS_SCC: Switch partition state change completed
+ * @IDT_SWPARTxSTS_STATE_MASK: Switch partition state mask
+ * @IDT_SWPARTxSTS_STATE_FLD: Switch partition state field offset
+ * @IDT_SWPARTxSTS_STATE_DIS: Switch partition disabled
+ * @IDT_SWPARTxSTS_STATE_ACT: Switch partition enabled
+ * @IDT_SWPARTxSTS_STATE_RES: Switch partition in reset
+ * @IDT_SWPARTxSTS_US: Switch partition has upstream port
+ * @IDT_SWPARTxSTS_USID_MASK: Switch partition upstream port ID mask
+ * @IDT_SWPARTxSTS_USID_FLD: Switch partition upstream port ID field offset
+ * @IDT_SWPARTxSTS_NT: Upstream port has NT function
+ * @IDT_SWPARTxSTS_DMA: Upstream port has DMA function
+ */
+#define IDT_SWPARTxSTS_SCI 0x00000001U
+#define IDT_SWPARTxSTS_SCC 0x00000002U
+#define IDT_SWPARTxSTS_STATE_MASK 0x00000060U
+#define IDT_SWPARTxSTS_STATE_FLD 5
+#define IDT_SWPARTxSTS_STATE_DIS 0x00000000U
+#define IDT_SWPARTxSTS_STATE_ACT 0x00000020U
+#define IDT_SWPARTxSTS_STATE_RES 0x00000060U
+#define IDT_SWPARTxSTS_US 0x00000100U
+#define IDT_SWPARTxSTS_USID_MASK 0x00003E00U
+#define IDT_SWPARTxSTS_USID_FLD 9
+#define IDT_SWPARTxSTS_NT 0x00004000U
+#define IDT_SWPARTxSTS_DMA 0x00008000U
+
+/*
+ * SWPORTxSTS register fields related constants
+ * @IDT_SWPORTxSTS_OMCI: Operation mode change initiated
+ * @IDT_SWPORTxSTS_OMCC: Operation mode change completed
+ * @IDT_SWPORTxSTS_LINKUP: Link up status
+ * @IDT_SWPORTxSTS_DS: Port lanes behave as downstream lanes
+ * @IDT_SWPORTxSTS_MODE_MASK: Port mode field mask
+ * @IDT_SWPORTxSTS_MODE_FLD: Port mode field offset
+ * @IDT_SWPORTxSTS_MODE_DIS: Port mode - disabled
+ * @IDT_SWPORTxSTS_MODE_DS: Port mode - downstream switch port
+ * @IDT_SWPORTxSTS_MODE_US: Port mode - upstream switch port
+ * @IDT_SWPORTxSTS_MODE_NT: Port mode - NT function
+ * @IDT_SWPORTxSTS_MODE_USNT: Port mode - upstream switch port with NTB
+ * @IDT_SWPORTxSTS_MODE_UNAT: Port mode - unattached
+ * @IDT_SWPORTxSTS_MODE_USDMA: Port mode - upstream switch port with DMA
+ * @IDT_SWPORTxSTS_MODE_USNTDMA:Port mode - upstream port with NTB and DMA
+ * @IDT_SWPORTxSTS_MODE_NTDMA: Port mode - NT function with DMA
+ * @IDT_SWPORTxSTS_SWPART_MASK: Port partition field mask
+ * @IDT_SWPORTxSTS_SWPART_FLD: Port partition field offset
+ * @IDT_SWPORTxSTS_DEVNUM_MASK: Port device number field mask
+ * @IDT_SWPORTxSTS_DEVNUM_FLD: Port device number field offset
+ */
+#define IDT_SWPORTxSTS_OMCI 0x00000001U
+#define IDT_SWPORTxSTS_OMCC 0x00000002U
+#define IDT_SWPORTxSTS_LINKUP 0x00000010U
+#define IDT_SWPORTxSTS_DS 0x00000020U
+#define IDT_SWPORTxSTS_MODE_MASK 0x000003C0U
+#define IDT_SWPORTxSTS_MODE_FLD 6
+#define IDT_SWPORTxSTS_MODE_DIS 0x00000000U
+#define IDT_SWPORTxSTS_MODE_DS 0x00000040U
+#define IDT_SWPORTxSTS_MODE_US 0x00000080U
+#define IDT_SWPORTxSTS_MODE_NT 0x000000C0U
+#define IDT_SWPORTxSTS_MODE_USNT 0x00000100U
+#define IDT_SWPORTxSTS_MODE_UNAT 0x00000140U
+#define IDT_SWPORTxSTS_MODE_USDMA 0x00000180U
+#define IDT_SWPORTxSTS_MODE_USNTDMA 0x000001C0U
+#define IDT_SWPORTxSTS_MODE_NTDMA 0x00000200U
+#define IDT_SWPORTxSTS_SWPART_MASK 0x00001C00U
+#define IDT_SWPORTxSTS_SWPART_FLD 10
+#define IDT_SWPORTxSTS_DEVNUM_MASK 0x001F0000U
+#define IDT_SWPORTxSTS_DEVNUM_FLD 16
+
+/*
+ * SEMSK register fields related constants
+ * @IDT_SEMSK_LINKUP: Link Up event mask bit
+ * @IDT_SEMSK_LINKDN: Link Down event mask bit
+ * @IDT_SEMSK_GSIGNAL: Global Signal event mask bit
+ */
+#define IDT_SEMSK_LINKUP 0x00000001U
+#define IDT_SEMSK_LINKDN 0x00000002U
+#define IDT_SEMSK_GSIGNAL 0x00000020U
+
+/*
+ * SWPxMSGCTL register fields related constants
+ * @IDT_SWPxMSGCTL_REG_MASK: Register select field mask
+ * @IDT_SWPxMSGCTL_REG_FLD: Register select field offset
+ * @IDT_SWPxMSGCTL_PART_MASK: Partition select field mask
+ * @IDT_SWPxMSGCTL_PART_FLD: Partition select field offset
+ */
+#define IDT_SWPxMSGCTL_REG_MASK 0x00000003U
+#define IDT_SWPxMSGCTL_REG_FLD 0
+#define IDT_SWPxMSGCTL_PART_MASK 0x00000070U
+#define IDT_SWPxMSGCTL_PART_FLD 4
+
+/*
+ * TMPSTS register fields related constants
+ * @IDT_TMPSTS_TEMP_MASK: Current temperature field mask
+ * @IDT_TMPSTS_TEMP_FLD: Current temperature field offset
+ */
+#define IDT_TMPSTS_TEMP_MASK 0x000000FFU
+#define IDT_TMPSTS_TEMP_FLD 0
+
+/*
+ * Helper macro to get/set the corresponding field value
+ * @GET_FIELD: Retrieve the value of the corresponding field
+ * @SET_FIELD: Set the specified field up
+ * @IS_FLD_SET: Check whether a field is set with value
+ */
+#define GET_FIELD(field, data) \
+ (((u32)(data) & IDT_ ##field## _MASK) >> IDT_ ##field## _FLD)
+#define SET_FIELD(field, data, value) \
+ (((u32)(data) & ~IDT_ ##field## _MASK) | \
+ ((u32)(value) << IDT_ ##field## _FLD))
+#define IS_FLD_SET(field, data, value) \
+ (((u32)(data) & IDT_ ##field## _MASK) == IDT_ ##field## _ ##value)
+
+/*
+ * Useful registers masks:
+ * @IDT_DBELL_MASK: Doorbell bits mask
+ * @IDT_OUTMSG_MASK: Out messages status bits mask
+ * @IDT_INMSG_MASK: In messages status bits mask
+ * @IDT_MSG_MASK: Any message status bits mask
+ */
+#define IDT_DBELL_MASK ((u32)0xFFFFFFFFU)
+#define IDT_OUTMSG_MASK ((u32)0x0000000FU)
+#define IDT_INMSG_MASK ((u32)0x000F0000U)
+#define IDT_MSG_MASK (IDT_INMSG_MASK | IDT_OUTMSG_MASK)
+
+/*
+ * Number of IDT NTB resources:
+ * @IDT_MSG_CNT: Number of Message registers
+ * @IDT_BAR_CNT: Number of BARs of each port
+ * @IDT_MTBL_ENTRY_CNT: Number mapping table entries
+ */
+#define IDT_MSG_CNT 4
+#define IDT_BAR_CNT 6
+#define IDT_MTBL_ENTRY_CNT 64
+
+/*
+ * General IDT PCIe-switch constant
+ * @IDT_MAX_NR_PORTS: Maximum number of ports per IDT PCIe-switch
+ * @IDT_MAX_NR_PARTS: Maximum number of partitions per IDT PCIe-switch
+ * @IDT_MAX_NR_PEERS: Maximum number of NT-peers per IDT PCIe-switch
+ * @IDT_MAX_NR_MWS: Maximum number of Memory Widows
+ * @IDT_PCIE_REGSIZE: Size of the registers in bytes
+ * @IDT_TRANS_ALIGN: Alignment of translated base address
+ * @IDT_DIR_SIZE_ALIGN: Alignment of size setting for direct translated MWs.
+ * Even though the lower 10 bits are reserved, they are
+ * treated by IDT as one's so basically there is no any
+ * alignment of size limit for DIR address translation.
+ */
+#define IDT_MAX_NR_PORTS 24
+#define IDT_MAX_NR_PARTS 8
+#define IDT_MAX_NR_PEERS 8
+#define IDT_MAX_NR_MWS 29
+#define IDT_PCIE_REGSIZE 4
+#define IDT_TRANS_ALIGN 4
+#define IDT_DIR_SIZE_ALIGN 1
+
+/*
+ * IDT Memory Windows type. Depending on the device settings, IDT supports
+ * Direct Address Translation MW registers and Lookup Table registers
+ * @IDT_MW_DIR: Direct address translation
+ * @IDT_MW_LUT12: 12-entry lookup table entry
+ * @IDT_MW_LUT24: 24-entry lookup table entry
+ *
+ * NOTE These values are exactly the same as one of the BARSETUP ATRAN field
+ */
+enum idt_mw_type {
+ IDT_MW_DIR = 0x0,
+ IDT_MW_LUT12 = 0x1,
+ IDT_MW_LUT24 = 0x2
+};
+
+/*
+ * IDT PCIe-switch model private data
+ * @name: Device name
+ * @port_cnt: Total number of NT endpoint ports
+ * @ports: Port ids
+ */
+struct idt_89hpes_cfg {
+ char *name;
+ unsigned char port_cnt;
+ unsigned char ports[];
+};
+
+/*
+ * Memory window configuration structure
+ * @type: Type of the memory window (direct address translation or lookup
+ * table)
+ *
+ * @bar: PCIe BAR the memory window referenced to
+ * @idx: Index of the memory window within the BAR
+ *
+ * @addr_align: Alignment of translated address
+ * @size_align: Alignment of memory window size
+ * @size_max: Maximum size of memory window
+ */
+struct idt_mw_cfg {
+ enum idt_mw_type type;
+
+ unsigned char bar;
+ unsigned char idx;
+
+ u64 addr_align;
+ u64 size_align;
+ u64 size_max;
+};
+
+/*
+ * Description structure of peer IDT NT-functions:
+ * @port: NT-function port
+ * @part: NT-function partition
+ *
+ * @mw_cnt: Number of memory windows supported by NT-function
+ * @mws: Array of memory windows descriptors
+ */
+struct idt_ntb_peer {
+ unsigned char port;
+ unsigned char part;
+
+ unsigned char mw_cnt;
+ struct idt_mw_cfg *mws;
+};
+
+/*
+ * Description structure of local IDT NT-function:
+ * @ntb: Linux NTB-device description structure
+ * @swcfg: Pointer to the structure of local IDT PCIe-switch
+ * specific cofnfigurations
+ *
+ * @port: Local NT-function port
+ * @part: Local NT-function partition
+ *
+ * @peer_cnt: Number of peers with activated NTB-function
+ * @peers: Array of peers descripting structures
+ * @port_idx_map: Map of port number -> peer index
+ * @part_idx_map: Map of partition number -> peer index
+ *
+ * @mtbl_lock: Mapping table access lock
+ *
+ * @mw_cnt: Number of memory windows supported by NT-function
+ * @mws: Array of memory windows descriptors
+ * @lut_lock: Lookup table access lock
+ *
+ * @msg_locks: Message registers mapping table lockers
+ *
+ * @cfgspc: Virtual address of the memory mapped configuration
+ * space of the NT-function
+ * @db_mask_lock: Doorbell mask register lock
+ * @msg_mask_lock: Message mask register lock
+ * @gasa_lock: GASA registers access lock
+ *
+ * @dbgfs_info: DebugFS info node
+ */
+struct idt_ntb_dev {
+ struct ntb_dev ntb;
+ struct idt_89hpes_cfg *swcfg;
+
+ unsigned char port;
+ unsigned char part;
+
+ unsigned char peer_cnt;
+ struct idt_ntb_peer peers[IDT_MAX_NR_PEERS];
+ char port_idx_map[IDT_MAX_NR_PORTS];
+ char part_idx_map[IDT_MAX_NR_PARTS];
+
+ spinlock_t mtbl_lock;
+
+ unsigned char mw_cnt;
+ struct idt_mw_cfg *mws;
+ spinlock_t lut_lock;
+
+ spinlock_t msg_locks[IDT_MSG_CNT];
+
+ void __iomem *cfgspc;
+ spinlock_t db_mask_lock;
+ spinlock_t msg_mask_lock;
+ spinlock_t gasa_lock;
+
+ struct dentry *dbgfs_info;
+};
+#define to_ndev_ntb(__ntb) container_of(__ntb, struct idt_ntb_dev, ntb)
+
+/*
+ * Descriptor of the IDT PCIe-switch BAR resources
+ * @setup: BAR setup register
+ * @limit: BAR limit register
+ * @ltbase: Lower translated base address
+ * @utbase: Upper translated base address
+ */
+struct idt_ntb_bar {
+ unsigned int setup;
+ unsigned int limit;
+ unsigned int ltbase;
+ unsigned int utbase;
+};
+
+/*
+ * Descriptor of the IDT PCIe-switch message resources
+ * @in: Inbound message register
+ * @out: Outbound message register
+ * @src: Source of inbound message register
+ */
+struct idt_ntb_msg {
+ unsigned int in;
+ unsigned int out;
+ unsigned int src;
+};
+
+/*
+ * Descriptor of the IDT PCIe-switch NT-function specific parameters in the
+ * PCI Configuration Space
+ * @bars: BARs related registers
+ * @msgs: Messaging related registers
+ */
+struct idt_ntb_regs {
+ struct idt_ntb_bar bars[IDT_BAR_CNT];
+ struct idt_ntb_msg msgs[IDT_MSG_CNT];
+};
+
+/*
+ * Descriptor of the IDT PCIe-switch port specific parameters in the
+ * Global Configuration Space
+ * @pcicmdsts: PCI command/status register
+ * @pcielctlsts: PCIe link control/status
+ *
+ * @ctl: Port control register
+ * @sts: Port status register
+ *
+ * @bars: BARs related registers
+ */
+struct idt_ntb_port {
+ unsigned int pcicmdsts;
+ unsigned int pcielctlsts;
+ unsigned int ntctl;
+
+ unsigned int ctl;
+ unsigned int sts;
+
+ struct idt_ntb_bar bars[IDT_BAR_CNT];
+};
+
+/*
+ * Descriptor of the IDT PCIe-switch partition specific parameters.
+ * @ctl: Partition control register in the Global Address Space
+ * @sts: Partition status register in the Global Address Space
+ * @msgctl: Messages control registers
+ */
+struct idt_ntb_part {
+ unsigned int ctl;
+ unsigned int sts;
+ unsigned int msgctl[IDT_MSG_CNT];
+};
+
+#endif /* NTB_HW_IDT_H */
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 7b3b6fd63d7d..2557e2c05b90 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -6,6 +6,7 @@
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ * Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -15,6 +16,7 @@
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ * Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -270,12 +272,12 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
if (db_addr) {
*db_addr = reg_addr + reg;
- dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
+ dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr);
}
if (db_size) {
*db_size = ndev->reg->db_size;
- dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
+ dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
}
return 0;
@@ -368,7 +370,8 @@ static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
if (spad_addr) {
*spad_addr = reg_addr + reg + (idx << 2);
- dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
+ dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n",
+ *spad_addr);
}
return 0;
@@ -409,7 +412,7 @@ static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
vec_mask |= ndev->db_link_mask;
- dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
+ dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask);
ndev->last_ts = jiffies;
@@ -428,7 +431,7 @@ static irqreturn_t ndev_vec_isr(int irq, void *dev)
{
struct intel_ntb_vec *nvec = dev;
- dev_dbg(ndev_dev(nvec->ndev), "irq: %d nvec->num: %d\n",
+ dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n",
irq, nvec->num);
return ndev_interrupt(nvec->ndev, nvec->num);
@@ -438,7 +441,7 @@ static irqreturn_t ndev_irq_isr(int irq, void *dev)
{
struct intel_ntb_dev *ndev = dev;
- return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
+ return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
}
static int ndev_init_isr(struct intel_ntb_dev *ndev,
@@ -448,7 +451,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev,
struct pci_dev *pdev;
int rc, i, msix_count, node;
- pdev = ndev_pdev(ndev);
+ pdev = ndev->ntb.pdev;
node = dev_to_node(&pdev->dev);
@@ -487,7 +490,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev,
goto err_msix_request;
}
- dev_dbg(ndev_dev(ndev), "Using %d msix interrupts\n", msix_count);
+ dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count);
ndev->db_vec_count = msix_count;
ndev->db_vec_shift = msix_shift;
return 0;
@@ -515,7 +518,7 @@ err_msix_vec_alloc:
if (rc)
goto err_msi_request;
- dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
+ dev_dbg(&pdev->dev, "Using msi interrupts\n");
ndev->db_vec_count = 1;
ndev->db_vec_shift = total_shift;
return 0;
@@ -533,7 +536,7 @@ err_msi_enable:
if (rc)
goto err_intx_request;
- dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
+ dev_dbg(&pdev->dev, "Using intx interrupts\n");
ndev->db_vec_count = 1;
ndev->db_vec_shift = total_shift;
return 0;
@@ -547,7 +550,7 @@ static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
struct pci_dev *pdev;
int i;
- pdev = ndev_pdev(ndev);
+ pdev = ndev->ntb.pdev;
/* Mask all doorbell interrupts */
ndev->db_mask = ndev->db_valid_mask;
@@ -744,7 +747,7 @@ static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
ndev = filp->private_data;
- pdev = ndev_pdev(ndev);
+ pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio;
buf_size = min(count, 0x800ul);
@@ -1019,7 +1022,8 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
ndev->debugfs_info = NULL;
} else {
ndev->debugfs_dir =
- debugfs_create_dir(ndev_name(ndev), debugfs_dir);
+ debugfs_create_dir(pci_name(ndev->ntb.pdev),
+ debugfs_dir);
if (!ndev->debugfs_dir)
ndev->debugfs_info = NULL;
else
@@ -1035,20 +1039,26 @@ static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
debugfs_remove_recursive(ndev->debugfs_dir);
}
-static int intel_ntb_mw_count(struct ntb_dev *ntb)
+static int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
{
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
return ntb_ndev(ntb)->mw_count;
}
-static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
- phys_addr_t *base,
- resource_size_t *size,
- resource_size_t *align,
- resource_size_t *align_size)
+static int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ resource_size_t bar_size, mw_size;
int bar;
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
if (idx >= ndev->b2b_idx && !ndev->b2b_off)
idx += 1;
@@ -1056,24 +1066,26 @@ static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
if (bar < 0)
return bar;
- if (base)
- *base = pci_resource_start(ndev->ntb.pdev, bar) +
- (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
+ bar_size = pci_resource_len(ndev->ntb.pdev, bar);
- if (size)
- *size = pci_resource_len(ndev->ntb.pdev, bar) -
- (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
+ if (idx == ndev->b2b_idx)
+ mw_size = bar_size - ndev->b2b_off;
+ else
+ mw_size = bar_size;
+
+ if (addr_align)
+ *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
- if (align)
- *align = pci_resource_len(ndev->ntb.pdev, bar);
+ if (size_align)
+ *size_align = 1;
- if (align_size)
- *align_size = 1;
+ if (size_max)
+ *size_max = mw_size;
return 0;
}
-static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
+static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
dma_addr_t addr, resource_size_t size)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
@@ -1083,6 +1095,9 @@ static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
u64 base, limit, reg_val;
int bar;
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
if (idx >= ndev->b2b_idx && !ndev->b2b_off)
idx += 1;
@@ -1171,7 +1186,7 @@ static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
return 0;
}
-static int intel_ntb_link_is_up(struct ntb_dev *ntb,
+static u64 intel_ntb_link_is_up(struct ntb_dev *ntb,
enum ntb_speed *speed,
enum ntb_width *width)
{
@@ -1206,13 +1221,13 @@ static int intel_ntb_link_enable(struct ntb_dev *ntb,
if (ndev->ntb.topo == NTB_TOPO_SEC)
return -EINVAL;
- dev_dbg(ndev_dev(ndev),
+ dev_dbg(&ntb->pdev->dev,
"Enabling link with max_speed %d max_width %d\n",
max_speed, max_width);
if (max_speed != NTB_SPEED_AUTO)
- dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
+ dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
if (max_width != NTB_WIDTH_AUTO)
- dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
+ dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
@@ -1235,7 +1250,7 @@ static int intel_ntb_link_disable(struct ntb_dev *ntb)
if (ndev->ntb.topo == NTB_TOPO_SEC)
return -EINVAL;
- dev_dbg(ndev_dev(ndev), "Disabling link\n");
+ dev_dbg(&ntb->pdev->dev, "Disabling link\n");
/* Bring NTB link down */
ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
@@ -1249,6 +1264,36 @@ static int intel_ntb_link_disable(struct ntb_dev *ntb)
return 0;
}
+static int intel_ntb_peer_mw_count(struct ntb_dev *ntb)
+{
+ /* Numbers of inbound and outbound memory windows match */
+ return ntb_ndev(ntb)->mw_count;
+}
+
+static int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
+ phys_addr_t *base, resource_size_t *size)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ int bar;
+
+ if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+ idx += 1;
+
+ bar = ndev_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ if (base)
+ *base = pci_resource_start(ndev->ntb.pdev, bar) +
+ (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
+
+ if (size)
+ *size = pci_resource_len(ndev->ntb.pdev, bar) -
+ (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
+
+ return 0;
+}
+
static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
{
return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
@@ -1366,30 +1411,30 @@ static int intel_ntb_spad_write(struct ntb_dev *ntb,
ndev->self_reg->spad);
}
-static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
+static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
phys_addr_t *spad_addr)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
- return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
+ return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr,
ndev->peer_reg->spad);
}
-static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
+static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
- return ndev_spad_read(ndev, idx,
+ return ndev_spad_read(ndev, sidx,
ndev->peer_mmio +
ndev->peer_reg->spad);
}
-static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
- int idx, u32 val)
+static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
+ int sidx, u32 val)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
- return ndev_spad_write(ndev, idx, val,
+ return ndev_spad_write(ndev, sidx, val,
ndev->peer_mmio +
ndev->peer_reg->spad);
}
@@ -1442,30 +1487,33 @@ static int atom_link_is_err(struct intel_ntb_dev *ndev)
static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
{
+ struct device *dev = &ndev->ntb.pdev->dev;
+
switch (ppd & ATOM_PPD_TOPO_MASK) {
case ATOM_PPD_TOPO_B2B_USD:
- dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
+ dev_dbg(dev, "PPD %d B2B USD\n", ppd);
return NTB_TOPO_B2B_USD;
case ATOM_PPD_TOPO_B2B_DSD:
- dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
+ dev_dbg(dev, "PPD %d B2B DSD\n", ppd);
return NTB_TOPO_B2B_DSD;
case ATOM_PPD_TOPO_PRI_USD:
case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
case ATOM_PPD_TOPO_SEC_USD:
case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
- dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
+ dev_dbg(dev, "PPD %d non B2B disabled\n", ppd);
return NTB_TOPO_NONE;
}
- dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
+ dev_dbg(dev, "PPD %d invalid\n", ppd);
return NTB_TOPO_NONE;
}
static void atom_link_hb(struct work_struct *work)
{
struct intel_ntb_dev *ndev = hb_ndev(work);
+ struct device *dev = &ndev->ntb.pdev->dev;
unsigned long poll_ts;
void __iomem *mmio;
u32 status32;
@@ -1503,30 +1551,30 @@ static void atom_link_hb(struct work_struct *work)
/* Clear AER Errors, write to clear */
status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
- dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
+ dev_dbg(dev, "ERRCORSTS = %x\n", status32);
status32 &= PCI_ERR_COR_REP_ROLL;
iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
/* Clear unexpected electrical idle event in LTSSM, write to clear */
status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
- dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
+ dev_dbg(dev, "LTSSMERRSTS0 = %x\n", status32);
status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
/* Clear DeSkew Buffer error, write to clear */
status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
- dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
+ dev_dbg(dev, "DESKEWSTS = %x\n", status32);
status32 |= ATOM_DESKEWSTS_DBERR;
iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
- dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
+ dev_dbg(dev, "IBSTERRRCRVSTS0 = %x\n", status32);
status32 &= ATOM_IBIST_ERR_OFLOW;
iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
/* Releases the NTB state machine to allow the link to retrain */
status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
- dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
+ dev_dbg(dev, "LTSSMSTATEJMP = %x\n", status32);
status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
@@ -1699,11 +1747,11 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
int b2b_bar;
u8 bar_sz;
- pdev = ndev_pdev(ndev);
+ pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio;
if (ndev->b2b_idx == UINT_MAX) {
- dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
+ dev_dbg(&pdev->dev, "not using b2b mw\n");
b2b_bar = 0;
ndev->b2b_off = 0;
} else {
@@ -1711,24 +1759,21 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
if (b2b_bar < 0)
return -EIO;
- dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
+ dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
- dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
+ dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
- dev_dbg(ndev_dev(ndev),
- "b2b using first half of bar\n");
+ dev_dbg(&pdev->dev, "b2b using first half of bar\n");
ndev->b2b_off = bar_size >> 1;
} else if (bar_size >= XEON_B2B_MIN_SIZE) {
- dev_dbg(ndev_dev(ndev),
- "b2b using whole bar\n");
+ dev_dbg(&pdev->dev, "b2b using whole bar\n");
ndev->b2b_off = 0;
--ndev->mw_count;
} else {
- dev_dbg(ndev_dev(ndev),
- "b2b bar size is too small\n");
+ dev_dbg(&pdev->dev, "b2b bar size is too small\n");
return -EIO;
}
}
@@ -1738,7 +1783,7 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
* except disable or halve the size of the b2b secondary bar.
*/
pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "IMBAR1SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz);
if (b2b_bar == 1) {
if (ndev->b2b_off)
bar_sz -= 1;
@@ -1748,10 +1793,10 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "EMBAR1SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz);
pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "IMBAR2SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz);
if (b2b_bar == 2) {
if (ndev->b2b_off)
bar_sz -= 1;
@@ -1761,7 +1806,7 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "EMBAR2SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz);
/* SBAR01 hit by first part of the b2b bar */
if (b2b_bar == 0)
@@ -1777,12 +1822,12 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
- dev_dbg(ndev_dev(ndev), "IMBAR1XLMT %#018llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
- dev_dbg(ndev_dev(ndev), "IMBAR2XLMT %#018llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
/* zero incoming translation addrs */
iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
@@ -1852,7 +1897,7 @@ static int skx_init_dev(struct intel_ntb_dev *ndev)
u8 ppd;
int rc;
- pdev = ndev_pdev(ndev);
+ pdev = ndev->ntb.pdev;
ndev->reg = &skx_reg;
@@ -1861,7 +1906,7 @@ static int skx_init_dev(struct intel_ntb_dev *ndev)
return -EIO;
ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
- dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
+ dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
ntb_topo_string(ndev->ntb.topo));
if (ndev->ntb.topo == NTB_TOPO_NONE)
return -EINVAL;
@@ -1885,14 +1930,14 @@ static int intel_ntb3_link_enable(struct ntb_dev *ntb,
ndev = container_of(ntb, struct intel_ntb_dev, ntb);
- dev_dbg(ndev_dev(ndev),
+ dev_dbg(&ntb->pdev->dev,
"Enabling link with max_speed %d max_width %d\n",
max_speed, max_width);
if (max_speed != NTB_SPEED_AUTO)
- dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
+ dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
if (max_width != NTB_WIDTH_AUTO)
- dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
+ dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
@@ -1902,7 +1947,7 @@ static int intel_ntb3_link_enable(struct ntb_dev *ntb,
return 0;
}
-static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx,
+static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
dma_addr_t addr, resource_size_t size)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
@@ -1912,6 +1957,9 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx,
u64 base, limit, reg_val;
int bar;
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
if (idx >= ndev->b2b_idx && !ndev->b2b_off)
idx += 1;
@@ -1953,7 +2001,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx,
return -EIO;
}
- dev_dbg(ndev_dev(ndev), "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
+ dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
/* set and verify setting the limit */
iowrite64(limit, mmio + limit_reg);
@@ -1964,7 +2012,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx,
return -EIO;
}
- dev_dbg(ndev_dev(ndev), "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
+ dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
/* setup the EP */
limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
@@ -1985,7 +2033,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx,
return -EIO;
}
- dev_dbg(ndev_dev(ndev), "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
+ dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
return 0;
}
@@ -2092,7 +2140,7 @@ static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
{
if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
- dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
+ dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd);
return 1;
}
return 0;
@@ -2122,11 +2170,11 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
int b2b_bar;
u8 bar_sz;
- pdev = ndev_pdev(ndev);
+ pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio;
if (ndev->b2b_idx == UINT_MAX) {
- dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
+ dev_dbg(&pdev->dev, "not using b2b mw\n");
b2b_bar = 0;
ndev->b2b_off = 0;
} else {
@@ -2134,24 +2182,21 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
if (b2b_bar < 0)
return -EIO;
- dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
+ dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
- dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
+ dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
- dev_dbg(ndev_dev(ndev),
- "b2b using first half of bar\n");
+ dev_dbg(&pdev->dev, "b2b using first half of bar\n");
ndev->b2b_off = bar_size >> 1;
} else if (XEON_B2B_MIN_SIZE <= bar_size) {
- dev_dbg(ndev_dev(ndev),
- "b2b using whole bar\n");
+ dev_dbg(&pdev->dev, "b2b using whole bar\n");
ndev->b2b_off = 0;
--ndev->mw_count;
} else {
- dev_dbg(ndev_dev(ndev),
- "b2b bar size is too small\n");
+ dev_dbg(&pdev->dev, "b2b bar size is too small\n");
return -EIO;
}
}
@@ -2163,7 +2208,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
* offsets are not in a consistent order (bar5sz comes after ppd, odd).
*/
pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
if (b2b_bar == 2) {
if (ndev->b2b_off)
bar_sz -= 1;
@@ -2172,11 +2217,11 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
}
pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);
if (!ndev->bar4_split) {
pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
if (b2b_bar == 4) {
if (ndev->b2b_off)
bar_sz -= 1;
@@ -2185,10 +2230,10 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
}
pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
} else {
pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
if (b2b_bar == 4) {
if (ndev->b2b_off)
bar_sz -= 1;
@@ -2197,10 +2242,10 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
}
pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);
pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
if (b2b_bar == 5) {
if (ndev->b2b_off)
bar_sz -= 1;
@@ -2209,7 +2254,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
}
pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
- dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
+ dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
}
/* SBAR01 hit by first part of the b2b bar */
@@ -2226,7 +2271,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
else
return -EIO;
- dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
/* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
@@ -2237,26 +2282,26 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
- dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);
if (!ndev->bar4_split) {
bar_addr = addr->bar4_addr64 +
(b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
- dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
} else {
bar_addr = addr->bar4_addr32 +
(b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
- dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);
bar_addr = addr->bar5_addr32 +
(b2b_bar == 5 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
- dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
}
/* setup incoming bar limits == base addrs (zero length windows) */
@@ -2264,26 +2309,26 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
- dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);
if (!ndev->bar4_split) {
bar_addr = addr->bar4_addr64 +
(b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
- dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
} else {
bar_addr = addr->bar4_addr32 +
(b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
- dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);
bar_addr = addr->bar5_addr32 +
(b2b_bar == 5 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
- dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
}
/* zero incoming translation addrs */
@@ -2309,23 +2354,23 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
bar_addr = peer_addr->bar2_addr64;
iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
- dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);
if (!ndev->bar4_split) {
bar_addr = peer_addr->bar4_addr64;
iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
- dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
} else {
bar_addr = peer_addr->bar4_addr32;
iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
- dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);
bar_addr = peer_addr->bar5_addr32;
iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
- dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
}
/* set the translation offset for b2b registers */
@@ -2343,7 +2388,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
return -EIO;
/* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
- dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
+ dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
@@ -2362,6 +2407,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
static int xeon_init_ntb(struct intel_ntb_dev *ndev)
{
+ struct device *dev = &ndev->ntb.pdev->dev;
int rc;
u32 ntb_ctl;
@@ -2377,7 +2423,7 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev)
switch (ndev->ntb.topo) {
case NTB_TOPO_PRI:
if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
- dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
+ dev_err(dev, "NTB Primary config disabled\n");
return -EINVAL;
}
@@ -2395,7 +2441,7 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev)
case NTB_TOPO_SEC:
if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
- dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
+ dev_err(dev, "NTB Secondary config disabled\n");
return -EINVAL;
}
/* use half the spads for the peer */
@@ -2420,18 +2466,17 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev)
ndev->b2b_idx = b2b_mw_idx;
if (ndev->b2b_idx >= ndev->mw_count) {
- dev_dbg(ndev_dev(ndev),
+ dev_dbg(dev,
"b2b_mw_idx %d invalid for mw_count %u\n",
b2b_mw_idx, ndev->mw_count);
return -EINVAL;
}
- dev_dbg(ndev_dev(ndev),
- "setting up b2b mw idx %d means %d\n",
+ dev_dbg(dev, "setting up b2b mw idx %d means %d\n",
b2b_mw_idx, ndev->b2b_idx);
} else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
- dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
+ dev_warn(dev, "Reduce doorbell count by 1\n");
ndev->db_count -= 1;
}
@@ -2472,7 +2517,7 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
u8 ppd;
int rc, mem;
- pdev = ndev_pdev(ndev);
+ pdev = ndev->ntb.pdev;
switch (pdev->device) {
/* There is a Xeon hardware errata related to writes to SDOORBELL or
@@ -2548,14 +2593,14 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
return -EIO;
ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
- dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
+ dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
ntb_topo_string(ndev->ntb.topo));
if (ndev->ntb.topo == NTB_TOPO_NONE)
return -EINVAL;
if (ndev->ntb.topo != NTB_TOPO_SEC) {
ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
- dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
+ dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n",
ppd, ndev->bar4_split);
} else {
/* This is a way for transparent BAR to figure out if we are
@@ -2565,7 +2610,7 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
mem = pci_select_bars(pdev, IORESOURCE_MEM);
ndev->bar4_split = hweight32(mem) ==
HSX_SPLIT_BAR_MW_COUNT + 1;
- dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
+ dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n",
mem, ndev->bar4_split);
}
@@ -2602,7 +2647,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
goto err_dma_mask;
- dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
+ dev_warn(&pdev->dev, "Cannot DMA highmem\n");
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -2610,7 +2655,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
goto err_dma_mask;
- dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
+ dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
}
ndev->self_mmio = pci_iomap(pdev, 0, 0);
@@ -2636,7 +2681,7 @@ err_pci_enable:
static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
{
- struct pci_dev *pdev = ndev_pdev(ndev);
+ struct pci_dev *pdev = ndev->ntb.pdev;
if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
pci_iounmap(pdev, ndev->peer_mmio);
@@ -2906,8 +2951,10 @@ static const struct intel_ntb_xlat_reg skx_sec_xlat = {
/* operations for primary side of local ntb */
static const struct ntb_dev_ops intel_ntb_ops = {
.mw_count = intel_ntb_mw_count,
- .mw_get_range = intel_ntb_mw_get_range,
+ .mw_get_align = intel_ntb_mw_get_align,
.mw_set_trans = intel_ntb_mw_set_trans,
+ .peer_mw_count = intel_ntb_peer_mw_count,
+ .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
.link_is_up = intel_ntb_link_is_up,
.link_enable = intel_ntb_link_enable,
.link_disable = intel_ntb_link_disable,
@@ -2932,8 +2979,10 @@ static const struct ntb_dev_ops intel_ntb_ops = {
static const struct ntb_dev_ops intel_ntb3_ops = {
.mw_count = intel_ntb_mw_count,
- .mw_get_range = intel_ntb_mw_get_range,
+ .mw_get_align = intel_ntb_mw_get_align,
.mw_set_trans = intel_ntb3_mw_set_trans,
+ .peer_mw_count = intel_ntb_peer_mw_count,
+ .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
.link_is_up = intel_ntb_link_is_up,
.link_enable = intel_ntb3_link_enable,
.link_disable = intel_ntb_link_disable,
@@ -3008,4 +3057,3 @@ static void __exit intel_ntb_pci_driver_exit(void)
debugfs_remove_recursive(debugfs_dir);
}
module_exit(intel_ntb_pci_driver_exit);
-
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index f2cf8a783f1e..2d6c38afb128 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -382,9 +382,6 @@ struct intel_ntb_dev {
struct dentry *debugfs_info;
};
-#define ndev_pdev(ndev) ((ndev)->ntb.pdev)
-#define ndev_name(ndev) pci_name(ndev_pdev(ndev))
-#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev)
#define ntb_ndev(__ntb) container_of(__ntb, struct intel_ntb_dev, ntb)
#define hb_ndev(__work) container_of(__work, struct intel_ntb_dev, \
hb_timer.work)
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
index 2e2530743831..03b80d89b980 100644
--- a/drivers/ntb/ntb.c
+++ b/drivers/ntb/ntb.c
@@ -5,6 +5,7 @@
* GPL LICENSE SUMMARY
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ * Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,6 +19,7 @@
* BSD LICENSE
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ * Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -191,6 +193,73 @@ void ntb_db_event(struct ntb_dev *ntb, int vector)
}
EXPORT_SYMBOL(ntb_db_event);
+void ntb_msg_event(struct ntb_dev *ntb)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+ {
+ if (ntb->ctx_ops && ntb->ctx_ops->msg_event)
+ ntb->ctx_ops->msg_event(ntb->ctx);
+ }
+ spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+}
+EXPORT_SYMBOL(ntb_msg_event);
+
+int ntb_default_port_number(struct ntb_dev *ntb)
+{
+ switch (ntb->topo) {
+ case NTB_TOPO_PRI:
+ case NTB_TOPO_B2B_USD:
+ return NTB_PORT_PRI_USD;
+ case NTB_TOPO_SEC:
+ case NTB_TOPO_B2B_DSD:
+ return NTB_PORT_SEC_DSD;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ntb_default_port_number);
+
+int ntb_default_peer_port_count(struct ntb_dev *ntb)
+{
+ return NTB_DEF_PEER_CNT;
+}
+EXPORT_SYMBOL(ntb_default_peer_port_count);
+
+int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx)
+{
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ switch (ntb->topo) {
+ case NTB_TOPO_PRI:
+ case NTB_TOPO_B2B_USD:
+ return NTB_PORT_SEC_DSD;
+ case NTB_TOPO_SEC:
+ case NTB_TOPO_B2B_DSD:
+ return NTB_PORT_PRI_USD;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ntb_default_peer_port_number);
+
+int ntb_default_peer_port_idx(struct ntb_dev *ntb, int port)
+{
+ int peer_port = ntb_default_peer_port_number(ntb, NTB_DEF_PEER_IDX);
+
+ if (peer_port == -EINVAL || port != peer_port)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(ntb_default_peer_port_idx);
+
static int ntb_probe(struct device *dev)
{
struct ntb_dev *ntb;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 10e5bf460139..9a03c5871efe 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -95,6 +95,9 @@ MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
static struct dentry *nt_debugfs_dir;
+/* Only two-ports NTB devices are supported */
+#define PIDX NTB_DEF_PEER_IDX
+
struct ntb_queue_entry {
/* ntb_queue list reference */
struct list_head entry;
@@ -670,7 +673,7 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
if (!mw->virt_addr)
return;
- ntb_mw_clear_trans(nt->ndev, num_mw);
+ ntb_mw_clear_trans(nt->ndev, PIDX, num_mw);
dma_free_coherent(&pdev->dev, mw->buff_size,
mw->virt_addr, mw->dma_addr);
mw->xlat_size = 0;
@@ -727,7 +730,8 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
}
/* Notify HW the memory location of the receive buffer */
- rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
+ rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr,
+ mw->xlat_size);
if (rc) {
dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
ntb_free_mw(nt, num_mw);
@@ -858,17 +862,17 @@ static void ntb_transport_link_work(struct work_struct *work)
size = max_mw_size;
spad = MW0_SZ_HIGH + (i * 2);
- ntb_peer_spad_write(ndev, spad, upper_32_bits(size));
+ ntb_peer_spad_write(ndev, PIDX, spad, upper_32_bits(size));
spad = MW0_SZ_LOW + (i * 2);
- ntb_peer_spad_write(ndev, spad, lower_32_bits(size));
+ ntb_peer_spad_write(ndev, PIDX, spad, lower_32_bits(size));
}
- ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
+ ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count);
- ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
+ ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count);
- ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
+ ntb_peer_spad_write(ndev, PIDX, VERSION, NTB_TRANSPORT_VERSION);
/* Query the remote side for its info */
val = ntb_spad_read(ndev, VERSION);
@@ -944,7 +948,7 @@ static void ntb_qp_link_work(struct work_struct *work)
val = ntb_spad_read(nt->ndev, QP_LINKS);
- ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
+ ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num));
/* query remote spad for qp ready bits */
dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
@@ -1055,7 +1059,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
int node;
int rc, i;
- mw_count = ntb_mw_count(ndev);
+ mw_count = ntb_mw_count(ndev, PIDX);
+
+ if (!ndev->ops->mw_set_trans) {
+ dev_err(&ndev->dev, "Inbound MW based NTB API is required\n");
+ return -EINVAL;
+ }
if (ntb_db_is_unsafe(ndev))
dev_dbg(&ndev->dev,
@@ -1064,6 +1073,9 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
dev_dbg(&ndev->dev,
"scratchpad is unsafe, proceed anyway...\n");
+ if (ntb_peer_port_count(ndev) != NTB_DEF_PEER_CNT)
+ dev_warn(&ndev->dev, "Multi-port NTB devices unsupported\n");
+
node = dev_to_node(&ndev->dev);
nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
@@ -1094,8 +1106,13 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
for (i = 0; i < mw_count; i++) {
mw = &nt->mw_vec[i];
- rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
- &mw->xlat_align, &mw->xlat_align_size);
+ rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align,
+ &mw->xlat_align_size, NULL);
+ if (rc)
+ goto err1;
+
+ rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr,
+ &mw->phys_size);
if (rc)
goto err1;
@@ -2091,8 +2108,7 @@ void ntb_transport_link_down(struct ntb_transport_qp *qp)
val = ntb_spad_read(qp->ndev, QP_LINKS);
- ntb_peer_spad_write(qp->ndev, QP_LINKS,
- val & ~BIT(qp->qp_num));
+ ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num));
if (qp->link_is_up)
ntb_send_link_down(qp);
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 5cab2831ce99..759f772fa00c 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -76,6 +76,7 @@
#define DMA_RETRIES 20
#define SZ_4G (1ULL << 32)
#define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */
+#define PIDX NTB_DEF_PEER_IDX
MODULE_LICENSE(DRIVER_LICENSE);
MODULE_VERSION(DRIVER_VERSION);
@@ -100,6 +101,10 @@ static bool use_dma; /* default to 0 */
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "Using DMA engine to measure performance");
+static bool on_node = true; /* default to 1 */
+module_param(on_node, bool, 0644);
+MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)");
+
struct perf_mw {
phys_addr_t phys_addr;
resource_size_t phys_size;
@@ -135,9 +140,6 @@ struct perf_ctx {
bool link_is_up;
struct delayed_work link_work;
wait_queue_head_t link_wq;
- struct dentry *debugfs_node_dir;
- struct dentry *debugfs_run;
- struct dentry *debugfs_threads;
u8 perf_threads;
/* mutex ensures only one set of threads run at once */
struct mutex run_mutex;
@@ -344,6 +346,10 @@ static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
static bool perf_dma_filter_fn(struct dma_chan *chan, void *node)
{
+ /* Is the channel required to be on the same node as the device? */
+ if (!on_node)
+ return true;
+
return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
}
@@ -361,7 +367,7 @@ static int ntb_perf_thread(void *data)
pr_debug("kthread %s starting...\n", current->comm);
- node = dev_to_node(&pdev->dev);
+ node = on_node ? dev_to_node(&pdev->dev) : NUMA_NO_NODE;
if (use_dma && !pctx->dma_chan) {
dma_cap_mask_t dma_mask;
@@ -454,7 +460,7 @@ static void perf_free_mw(struct perf_ctx *perf)
if (!mw->virt_addr)
return;
- ntb_mw_clear_trans(perf->ntb, 0);
+ ntb_mw_clear_trans(perf->ntb, PIDX, 0);
dma_free_coherent(&pdev->dev, mw->buf_size,
mw->virt_addr, mw->dma_addr);
mw->xlat_size = 0;
@@ -490,7 +496,7 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
mw->buf_size = 0;
}
- rc = ntb_mw_set_trans(perf->ntb, 0, mw->dma_addr, mw->xlat_size);
+ rc = ntb_mw_set_trans(perf->ntb, PIDX, 0, mw->dma_addr, mw->xlat_size);
if (rc) {
dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n");
perf_free_mw(perf);
@@ -517,9 +523,9 @@ static void perf_link_work(struct work_struct *work)
if (max_mw_size && size > max_mw_size)
size = max_mw_size;
- ntb_peer_spad_write(ndev, MW_SZ_HIGH, upper_32_bits(size));
- ntb_peer_spad_write(ndev, MW_SZ_LOW, lower_32_bits(size));
- ntb_peer_spad_write(ndev, VERSION, PERF_VERSION);
+ ntb_peer_spad_write(ndev, PIDX, MW_SZ_HIGH, upper_32_bits(size));
+ ntb_peer_spad_write(ndev, PIDX, MW_SZ_LOW, lower_32_bits(size));
+ ntb_peer_spad_write(ndev, PIDX, VERSION, PERF_VERSION);
/* now read what peer wrote */
val = ntb_spad_read(ndev, VERSION);
@@ -561,8 +567,12 @@ static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
mw = &perf->mw;
- rc = ntb_mw_get_range(ntb, 0, &mw->phys_addr, &mw->phys_size,
- &mw->xlat_align, &mw->xlat_align_size);
+ rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align,
+ &mw->xlat_align_size, NULL);
+ if (rc)
+ return rc;
+
+ rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size);
if (rc)
return rc;
@@ -677,7 +687,8 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
pr_info("Fix run_order to %u\n", run_order);
}
- node = dev_to_node(&perf->ntb->pdev->dev);
+ node = on_node ? dev_to_node(&perf->ntb->pdev->dev)
+ : NUMA_NO_NODE;
atomic_set(&perf->tdone, 0);
/* launch kernel thread */
@@ -723,34 +734,71 @@ static const struct file_operations ntb_perf_debugfs_run = {
static int perf_debugfs_setup(struct perf_ctx *perf)
{
struct pci_dev *pdev = perf->ntb->pdev;
+ struct dentry *debugfs_node_dir;
+ struct dentry *debugfs_run;
+ struct dentry *debugfs_threads;
+ struct dentry *debugfs_seg_order;
+ struct dentry *debugfs_run_order;
+ struct dentry *debugfs_use_dma;
+ struct dentry *debugfs_on_node;
if (!debugfs_initialized())
return -ENODEV;
+ /* Assumpion: only one NTB device in the system */
if (!perf_debugfs_dir) {
perf_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (!perf_debugfs_dir)
return -ENODEV;
}
- perf->debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
- perf_debugfs_dir);
- if (!perf->debugfs_node_dir)
- return -ENODEV;
+ debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
+ perf_debugfs_dir);
+ if (!debugfs_node_dir)
+ goto err;
- perf->debugfs_run = debugfs_create_file("run", S_IRUSR | S_IWUSR,
- perf->debugfs_node_dir, perf,
- &ntb_perf_debugfs_run);
- if (!perf->debugfs_run)
- return -ENODEV;
+ debugfs_run = debugfs_create_file("run", S_IRUSR | S_IWUSR,
+ debugfs_node_dir, perf,
+ &ntb_perf_debugfs_run);
+ if (!debugfs_run)
+ goto err;
- perf->debugfs_threads = debugfs_create_u8("threads", S_IRUSR | S_IWUSR,
- perf->debugfs_node_dir,
- &perf->perf_threads);
- if (!perf->debugfs_threads)
- return -ENODEV;
+ debugfs_threads = debugfs_create_u8("threads", S_IRUSR | S_IWUSR,
+ debugfs_node_dir,
+ &perf->perf_threads);
+ if (!debugfs_threads)
+ goto err;
+
+ debugfs_seg_order = debugfs_create_u32("seg_order", 0600,
+ debugfs_node_dir,
+ &seg_order);
+ if (!debugfs_seg_order)
+ goto err;
+
+ debugfs_run_order = debugfs_create_u32("run_order", 0600,
+ debugfs_node_dir,
+ &run_order);
+ if (!debugfs_run_order)
+ goto err;
+
+ debugfs_use_dma = debugfs_create_bool("use_dma", 0600,
+ debugfs_node_dir,
+ &use_dma);
+ if (!debugfs_use_dma)
+ goto err;
+
+ debugfs_on_node = debugfs_create_bool("on_node", 0600,
+ debugfs_node_dir,
+ &on_node);
+ if (!debugfs_on_node)
+ goto err;
return 0;
+
+err:
+ debugfs_remove_recursive(perf_debugfs_dir);
+ perf_debugfs_dir = NULL;
+ return -ENODEV;
}
static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
@@ -766,8 +814,15 @@ static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
return -EIO;
}
- node = dev_to_node(&pdev->dev);
+ if (!ntb->ops->mw_set_trans) {
+ dev_err(&ntb->dev, "Need inbound MW based NTB API\n");
+ return -EINVAL;
+ }
+
+ if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT)
+ dev_warn(&ntb->dev, "Multi-port NTB devices unsupported\n");
+ node = on_node ? dev_to_node(&pdev->dev) : NUMA_NO_NODE;
perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
if (!perf) {
rc = -ENOMEM;
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 435861189d97..938a18bcfc3f 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -90,6 +90,9 @@ static unsigned long db_init = 0x7;
module_param(db_init, ulong, 0644);
MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer");
+/* Only two-ports NTB devices are supported */
+#define PIDX NTB_DEF_PEER_IDX
+
struct pp_ctx {
struct ntb_dev *ntb;
u64 db_bits;
@@ -135,7 +138,7 @@ static void pp_ping(unsigned long ctx)
"Ping bits %#llx read %#x write %#x\n",
db_bits, spad_rd, spad_wr);
- ntb_peer_spad_write(pp->ntb, 0, spad_wr);
+ ntb_peer_spad_write(pp->ntb, PIDX, 0, spad_wr);
ntb_peer_db_set(pp->ntb, db_bits);
ntb_db_clear_mask(pp->ntb, db_mask);
@@ -222,6 +225,12 @@ static int pp_probe(struct ntb_client *client,
}
}
+ if (ntb_spad_count(ntb) < 1) {
+ dev_dbg(&ntb->dev, "no enough scratchpads\n");
+ rc = -EINVAL;
+ goto err_pp;
+ }
+
if (ntb_spad_is_unsafe(ntb)) {
dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
if (!unsafe) {
@@ -230,6 +239,9 @@ static int pp_probe(struct ntb_client *client,
}
}
+ if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT)
+ dev_warn(&ntb->dev, "multi-port NTB is unsupported\n");
+
pp = kmalloc(sizeof(*pp), GFP_KERNEL);
if (!pp) {
rc = -ENOMEM;
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index 61bf2ef87e0e..f002bf48a08d 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -119,7 +119,10 @@ MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
-#define MAX_MWS 16
+/* It is rare to have hadrware with greater than six MWs */
+#define MAX_MWS 6
+/* Only two-ports devices are supported */
+#define PIDX NTB_DEF_PEER_IDX
static struct dentry *tool_dbgfs;
@@ -459,13 +462,22 @@ static TOOL_FOPS_RDWR(tool_spad_fops,
tool_spad_read,
tool_spad_write);
+static u32 ntb_tool_peer_spad_read(struct ntb_dev *ntb, int sidx)
+{
+ return ntb_peer_spad_read(ntb, PIDX, sidx);
+}
+
static ssize_t tool_peer_spad_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
- return tool_spadfn_read(tc, ubuf, size, offp,
- tc->ntb->ops->peer_spad_read);
+ return tool_spadfn_read(tc, ubuf, size, offp, ntb_tool_peer_spad_read);
+}
+
+static int ntb_tool_peer_spad_write(struct ntb_dev *ntb, int sidx, u32 val)
+{
+ return ntb_peer_spad_write(ntb, PIDX, sidx, val);
}
static ssize_t tool_peer_spad_write(struct file *filep, const char __user *ubuf,
@@ -474,7 +486,7 @@ static ssize_t tool_peer_spad_write(struct file *filep, const char __user *ubuf,
struct tool_ctx *tc = filep->private_data;
return tool_spadfn_write(tc, ubuf, size, offp,
- tc->ntb->ops->peer_spad_write);
+ ntb_tool_peer_spad_write);
}
static TOOL_FOPS_RDWR(tool_peer_spad_fops,
@@ -668,28 +680,27 @@ static int tool_setup_mw(struct tool_ctx *tc, int idx, size_t req_size)
{
int rc;
struct tool_mw *mw = &tc->mws[idx];
- phys_addr_t base;
- resource_size_t size, align, align_size;
+ resource_size_t size, align_addr, align_size;
char buf[16];
if (mw->peer)
return 0;
- rc = ntb_mw_get_range(tc->ntb, idx, &base, &size, &align,
- &align_size);
+ rc = ntb_mw_get_align(tc->ntb, PIDX, idx, &align_addr,
+ &align_size, &size);
if (rc)
return rc;
mw->size = min_t(resource_size_t, req_size, size);
- mw->size = round_up(mw->size, align);
+ mw->size = round_up(mw->size, align_addr);
mw->size = round_up(mw->size, align_size);
mw->peer = dma_alloc_coherent(&tc->ntb->pdev->dev, mw->size,
&mw->peer_dma, GFP_KERNEL);
- if (!mw->peer)
+ if (!mw->peer || !IS_ALIGNED(mw->peer_dma, align_addr))
return -ENOMEM;
- rc = ntb_mw_set_trans(tc->ntb, idx, mw->peer_dma, mw->size);
+ rc = ntb_mw_set_trans(tc->ntb, PIDX, idx, mw->peer_dma, mw->size);
if (rc)
goto err_free_dma;
@@ -716,7 +727,7 @@ static void tool_free_mw(struct tool_ctx *tc, int idx)
struct tool_mw *mw = &tc->mws[idx];
if (mw->peer) {
- ntb_mw_clear_trans(tc->ntb, idx);
+ ntb_mw_clear_trans(tc->ntb, PIDX, idx);
dma_free_coherent(&tc->ntb->pdev->dev, mw->size,
mw->peer,
mw->peer_dma);
@@ -742,8 +753,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep,
phys_addr_t base;
resource_size_t mw_size;
- resource_size_t align;
+ resource_size_t align_addr;
resource_size_t align_size;
+ resource_size_t max_size;
buf_size = min_t(size_t, size, 512);
@@ -751,8 +763,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep,
if (!buf)
return -ENOMEM;
- ntb_mw_get_range(mw->tc->ntb, mw->idx,
- &base, &mw_size, &align, &align_size);
+ ntb_mw_get_align(mw->tc->ntb, PIDX, mw->idx,
+ &align_addr, &align_size, &max_size);
+ ntb_peer_mw_get_addr(mw->tc->ntb, mw->idx, &base, &mw_size);
off += scnprintf(buf + off, buf_size - off,
"Peer MW %d Information:\n", mw->idx);
@@ -767,13 +780,17 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep,
off += scnprintf(buf + off, buf_size - off,
"Alignment \t%lld\n",
- (unsigned long long)align);
+ (unsigned long long)align_addr);
off += scnprintf(buf + off, buf_size - off,
"Size Alignment \t%lld\n",
(unsigned long long)align_size);
off += scnprintf(buf + off, buf_size - off,
+ "Size Max \t%lld\n",
+ (unsigned long long)max_size);
+
+ off += scnprintf(buf + off, buf_size - off,
"Ready \t%c\n",
(mw->peer) ? 'Y' : 'N');
@@ -827,8 +844,7 @@ static int tool_init_mw(struct tool_ctx *tc, int idx)
phys_addr_t base;
int rc;
- rc = ntb_mw_get_range(tc->ntb, idx, &base, &mw->win_size,
- NULL, NULL);
+ rc = ntb_peer_mw_get_addr(tc->ntb, idx, &base, &mw->win_size);
if (rc)
return rc;
@@ -913,12 +929,27 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
int rc;
int i;
+ if (!ntb->ops->mw_set_trans) {
+ dev_dbg(&ntb->dev, "need inbound MW based NTB API\n");
+ rc = -EINVAL;
+ goto err_tc;
+ }
+
+ if (ntb_spad_count(ntb) < 1) {
+ dev_dbg(&ntb->dev, "no enough scratchpads\n");
+ rc = -EINVAL;
+ goto err_tc;
+ }
+
if (ntb_db_is_unsafe(ntb))
dev_dbg(&ntb->dev, "doorbell is unsafe\n");
if (ntb_spad_is_unsafe(ntb))
dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
+ if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT)
+ dev_warn(&ntb->dev, "multi-port NTB is unsupported\n");
+
tc = kzalloc(sizeof(*tc), GFP_KERNEL);
if (!tc) {
rc = -ENOMEM;
@@ -928,7 +959,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
tc->ntb = ntb;
init_waitqueue_head(&tc->link_wq);
- tc->mw_count = min(ntb_mw_count(tc->ntb), MAX_MWS);
+ tc->mw_count = min(ntb_mw_count(tc->ntb, PIDX), MAX_MWS);
for (i = 0; i < tc->mw_count; i++) {
rc = tool_init_mw(tc, i);
if (rc)
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7cd99b1f8596..75bc08c6838c 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -421,14 +421,15 @@ static void set_badblock(struct badblocks *bb, sector_t s, int num)
static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
{
const unsigned int sector_size = 512;
- sector_t start_sector;
+ sector_t start_sector, end_sector;
u64 num_sectors;
u32 rem;
start_sector = div_u64(ns_offset, sector_size);
- num_sectors = div_u64_rem(len, sector_size, &rem);
+ end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
if (rem)
- num_sectors++;
+ end_sector++;
+ num_sectors = end_sector - start_sector;
if (unlikely(num_sectors > (u64)INT_MAX)) {
u64 remaining = num_sectors;
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index 5acf8694fb23..7bb9870f6d8c 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -1483,7 +1483,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
bridge->swizzle_irq = pci_common_swizzle;
err = pci_scan_root_bus_bridge(bridge);
- if (!err)
+ if (err < 0)
goto err_free_res;
bus = bridge->bus;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 607f677f48d2..d51e8738f9c2 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -511,6 +511,7 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
}
pci_restore_state(pci_dev);
+ pci_pme_restore(pci_dev);
return 0;
}
@@ -522,6 +523,7 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
{
pci_power_up(pci_dev);
pci_restore_state(pci_dev);
+ pci_pme_restore(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d88edf5c563b..af0cc3456dc1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1801,7 +1801,11 @@ static void __pci_pme_active(struct pci_dev *dev, bool enable)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
}
-static void pci_pme_restore(struct pci_dev *dev)
+/**
+ * pci_pme_restore - Restore PME configuration after config space restore.
+ * @dev: PCI device to update.
+ */
+void pci_pme_restore(struct pci_dev *dev)
{
u16 pmcsr;
@@ -1811,6 +1815,7 @@ static void pci_pme_restore(struct pci_dev *dev)
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
if (dev->wakeup_prepared) {
pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+ pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
} else {
pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
pmcsr |= PCI_PM_CTRL_PME_STATUS;
@@ -1907,14 +1912,9 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
{
int ret = 0;
- /*
- * Don't do the same thing twice in a row for one device, but restore
- * PME Enable in case it has been updated by config space restoration.
- */
- if (!!enable == !!dev->wakeup_prepared) {
- pci_pme_restore(dev);
+ /* Don't do the same thing twice in a row for one device. */
+ if (!!enable == !!dev->wakeup_prepared)
return 0;
- }
/*
* According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 03e3d0285aea..22e061738c6f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -71,6 +71,7 @@ void pci_power_up(struct pci_dev *dev);
void pci_disable_enabled_device(struct pci_dev *dev);
int pci_finish_runtime_suspend(struct pci_dev *dev);
int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
+void pci_pme_restore(struct pci_dev *dev);
bool pci_dev_keep_suspended(struct pci_dev *dev);
void pci_dev_complete_resume(struct pci_dev *pci_dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 80e58d25006d..fafdb165dd2e 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -40,17 +40,11 @@ static int __init pcie_pme_setup(char *str)
}
__setup("pcie_pme=", pcie_pme_setup);
-enum pme_suspend_level {
- PME_SUSPEND_NONE = 0,
- PME_SUSPEND_WAKEUP,
- PME_SUSPEND_NOIRQ,
-};
-
struct pcie_pme_service_data {
spinlock_t lock;
struct pcie_device *srv;
struct work_struct work;
- enum pme_suspend_level suspend_level;
+ bool noirq; /* If set, keep the PME interrupt disabled. */
};
/**
@@ -228,7 +222,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
spin_lock_irq(&data->lock);
for (;;) {
- if (data->suspend_level != PME_SUSPEND_NONE)
+ if (data->noirq)
break;
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
@@ -255,7 +249,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
spin_lock_irq(&data->lock);
}
- if (data->suspend_level == PME_SUSPEND_NONE)
+ if (!data->noirq)
pcie_pme_interrupt_enable(port, true);
spin_unlock_irq(&data->lock);
@@ -378,7 +372,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
{
struct pcie_pme_service_data *data = get_service_data(srv);
struct pci_dev *port = srv->port;
- bool wakeup, wake_irq_enabled = false;
+ bool wakeup;
int ret;
if (device_may_wakeup(&port->dev)) {
@@ -388,19 +382,16 @@ static int pcie_pme_suspend(struct pcie_device *srv)
wakeup = pcie_pme_check_wakeup(port->subordinate);
up_read(&pci_bus_sem);
}
- spin_lock_irq(&data->lock);
if (wakeup) {
ret = enable_irq_wake(srv->irq);
- if (ret == 0) {
- data->suspend_level = PME_SUSPEND_WAKEUP;
- wake_irq_enabled = true;
- }
- }
- if (!wake_irq_enabled) {
- pcie_pme_interrupt_enable(port, false);
- pcie_clear_root_pme_status(port);
- data->suspend_level = PME_SUSPEND_NOIRQ;
+ if (!ret)
+ return 0;
}
+
+ spin_lock_irq(&data->lock);
+ pcie_pme_interrupt_enable(port, false);
+ pcie_clear_root_pme_status(port);
+ data->noirq = true;
spin_unlock_irq(&data->lock);
synchronize_irq(srv->irq);
@@ -417,15 +408,15 @@ static int pcie_pme_resume(struct pcie_device *srv)
struct pcie_pme_service_data *data = get_service_data(srv);
spin_lock_irq(&data->lock);
- if (data->suspend_level == PME_SUSPEND_NOIRQ) {
+ if (data->noirq) {
struct pci_dev *port = srv->port;
pcie_clear_root_pme_status(port);
pcie_pme_interrupt_enable(port, true);
+ data->noirq = false;
} else {
disable_irq_wake(srv->irq);
}
- data->suspend_level = PME_SUSPEND_NONE;
spin_unlock_irq(&data->lock);
return 0;
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index 4300a558d0f3..322de58eebaf 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -17,17 +17,27 @@
*/
int loongson3_cpu_temp(int cpu)
{
- u32 reg;
+ u32 reg, prid_rev;
reg = LOONGSON_CHIPTEMP(cpu);
- if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1)
+ prid_rev = read_c0_prid() & PRID_REV_MASK;
+ switch (prid_rev) {
+ case PRID_REV_LOONGSON3A_R1:
reg = (reg >> 8) & 0xff;
- else
+ break;
+ case PRID_REV_LOONGSON3A_R2:
+ case PRID_REV_LOONGSON3B_R1:
+ case PRID_REV_LOONGSON3B_R2:
reg = ((reg >> 8) & 0xff) - 100;
-
+ break;
+ case PRID_REV_LOONGSON3A_R3:
+ reg = (reg & 0xffff)*731/0x4000 - 273;
+ break;
+ }
return (int)reg * 1000;
}
+static int nr_packages;
static struct device *cpu_hwmon_dev;
static ssize_t get_hwmon_name(struct device *dev,
@@ -51,88 +61,74 @@ static ssize_t get_hwmon_name(struct device *dev,
return sprintf(buf, "cpu-hwmon\n");
}
-static ssize_t get_cpu0_temp(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t get_cpu1_temp(struct device *dev,
+static ssize_t get_cpu_temp(struct device *dev,
struct device_attribute *attr, char *buf);
-static ssize_t cpu0_temp_label(struct device *dev,
+static ssize_t cpu_temp_label(struct device *dev,
struct device_attribute *attr, char *buf);
-static ssize_t cpu1_temp_label(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, get_cpu0_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, cpu0_temp_label, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_cpu1_temp, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, cpu1_temp_label, NULL, 2);
-static const struct attribute *hwmon_cputemp1[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- NULL
-};
-
-static const struct attribute *hwmon_cputemp2[] = {
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- NULL
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, get_cpu_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, cpu_temp_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_cpu_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, cpu_temp_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, get_cpu_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, cpu_temp_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, get_cpu_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, cpu_temp_label, NULL, 4);
+
+static const struct attribute *hwmon_cputemp[4][3] = {
+ {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ NULL
+ },
+ {
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ NULL
+ },
+ {
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ NULL
+ },
+ {
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ NULL
+ }
};
-static ssize_t cpu0_temp_label(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "CPU 0 Temperature\n");
-}
-
-static ssize_t cpu1_temp_label(struct device *dev,
+static ssize_t cpu_temp_label(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "CPU 1 Temperature\n");
+ int id = (to_sensor_dev_attr(attr))->index - 1;
+ return sprintf(buf, "CPU %d Temperature\n", id);
}
-static ssize_t get_cpu0_temp(struct device *dev,
+static ssize_t get_cpu_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int value = loongson3_cpu_temp(0);
- return sprintf(buf, "%d\n", value);
-}
-
-static ssize_t get_cpu1_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int value = loongson3_cpu_temp(1);
+ int id = (to_sensor_dev_attr(attr))->index - 1;
+ int value = loongson3_cpu_temp(id);
return sprintf(buf, "%d\n", value);
}
static int create_sysfs_cputemp_files(struct kobject *kobj)
{
- int ret;
-
- ret = sysfs_create_files(kobj, hwmon_cputemp1);
- if (ret)
- goto sysfs_create_temp1_fail;
-
- if (loongson_sysconf.nr_cpus <= loongson_sysconf.cores_per_package)
- return 0;
+ int i, ret = 0;
- ret = sysfs_create_files(kobj, hwmon_cputemp2);
- if (ret)
- goto sysfs_create_temp2_fail;
+ for (i=0; i<nr_packages; i++)
+ ret = sysfs_create_files(kobj, hwmon_cputemp[i]);
- return 0;
-
-sysfs_create_temp2_fail:
- sysfs_remove_files(kobj, hwmon_cputemp1);
-
-sysfs_create_temp1_fail:
- return -1;
+ return ret;
}
static void remove_sysfs_cputemp_files(struct kobject *kobj)
{
- sysfs_remove_files(&cpu_hwmon_dev->kobj, hwmon_cputemp1);
+ int i;
- if (loongson_sysconf.nr_cpus > loongson_sysconf.cores_per_package)
- sysfs_remove_files(&cpu_hwmon_dev->kobj, hwmon_cputemp2);
+ for (i=0; i<nr_packages; i++)
+ sysfs_remove_files(kobj, hwmon_cputemp[i]);
}
#define CPU_THERMAL_THRESHOLD 90000
@@ -140,8 +136,15 @@ static struct delayed_work thermal_work;
static void do_thermal_timer(struct work_struct *work)
{
- int value = loongson3_cpu_temp(0);
- if (value <= CPU_THERMAL_THRESHOLD)
+ int i, value, temp_max = 0;
+
+ for (i=0; i<nr_packages; i++) {
+ value = loongson3_cpu_temp(i);
+ if (value > temp_max)
+ temp_max = value;
+ }
+
+ if (temp_max <= CPU_THERMAL_THRESHOLD)
schedule_delayed_work(&thermal_work, msecs_to_jiffies(5000));
else
orderly_poweroff(true);
@@ -160,6 +163,9 @@ static int __init loongson_hwmon_init(void)
goto fail_hwmon_device_register;
}
+ nr_packages = loongson_sysconf.nr_cpus /
+ loongson_sysconf.cores_per_package;
+
ret = sysfs_create_group(&cpu_hwmon_dev->kobj,
&cpu_hwmon_attribute_group);
if (ret) {
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index 9866fec78c1c..0831b428c217 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -604,7 +604,7 @@ static struct attribute *hdmi_attrs[] = {
NULL,
};
-static struct attribute_group hdmi_attribute_group = {
+static const struct attribute_group hdmi_attribute_group = {
.name = "hdmi",
.attrs = hdmi_attrs,
};
@@ -660,7 +660,7 @@ static struct attribute *amplifier_attrs[] = {
NULL,
};
-static struct attribute_group amplifier_attribute_group = {
+static const struct attribute_group amplifier_attribute_group = {
.name = "amplifier",
.attrs = amplifier_attrs,
};
@@ -741,7 +741,7 @@ static struct attribute *deepsleep_attrs[] = {
NULL,
};
-static struct attribute_group deepsleep_attribute_group = {
+static const struct attribute_group deepsleep_attribute_group = {
.name = "deepsleep",
.attrs = deepsleep_attrs,
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 6c7d86074b38..709e3a67391a 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -1433,7 +1433,7 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
return ok ? attr->mode : 0;
}
-static struct attribute_group hwmon_attribute_group = {
+static const struct attribute_group hwmon_attribute_group = {
.is_visible = asus_hwmon_sysfs_is_visible,
.attrs = hwmon_attributes
};
@@ -1821,7 +1821,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
return ok ? attr->mode : 0;
}
-static struct attribute_group platform_attribute_group = {
+static const struct attribute_group platform_attribute_group = {
.is_visible = asus_sysfs_is_visible,
.attrs = platform_attributes
};
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index e1c2b6d4b24a..a8e4a539e704 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -718,7 +718,7 @@ static struct attribute *compal_platform_attrs[] = {
&dev_attr_wake_up_mouse.attr,
NULL
};
-static struct attribute_group compal_platform_attr_group = {
+static const struct attribute_group compal_platform_attr_group = {
.attrs = compal_platform_attrs
};
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index c1a852847d02..85de30f93a9c 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -317,7 +317,7 @@ static struct attribute *fujitsu_pf_attributes[] = {
NULL
};
-static struct attribute_group fujitsu_pf_attribute_group = {
+static const struct attribute_group fujitsu_pf_attribute_group = {
.attrs = fujitsu_pf_attributes
};
@@ -695,6 +695,9 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
if (call_fext_func(device,
FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
led->name = "fujitsu::logolamp";
led->brightness_set_blocking = logolamp_set;
led->brightness_get = logolamp_get;
@@ -707,6 +710,9 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
(call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
led->name = "fujitsu::kblamps";
led->brightness_set_blocking = kblamps_set;
led->brightness_get = kblamps_get;
@@ -723,6 +729,9 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
*/
if (call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
led->name = "fujitsu::radio_led";
led->brightness_set_blocking = radio_led_set;
led->brightness_get = radio_led_get;
@@ -741,6 +750,9 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
(call_fext_func(device,
FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
led->name = "fujitsu::eco_led";
led->brightness_set_blocking = eco_led_set;
led->brightness_get = eco_led_get;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 527e5d9ab9bf..603fc6050971 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -909,17 +909,94 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo V310-14IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14IKB"),
+ },
+ },
+ {
+ .ident = "Lenovo V310-14ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14ISK"),
+ },
+ },
+ {
+ .ident = "Lenovo V310-15IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15IKB"),
+ },
+ },
+ {
.ident = "Lenovo V310-15ISK",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15ISK"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15ISK"),
+ },
+ },
+ {
+ .ident = "Lenovo V510-15IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V510-15IKB"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad 300-15IBR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IBR"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad 300-15IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IKB"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad 300S-11IBR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300S-11BR"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad 310-15ABR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ABR"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad 310-15IAP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IAP"),
},
},
{
.ident = "Lenovo ideapad 310-15IKB",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IKB"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IKB"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad 310-15ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ISK"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad Y700-14ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-14ISK"),
},
},
{
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index 4cc2f4ea0a25..cd21df982abd 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -710,6 +710,24 @@ static const struct file_operations telem_socstate_ops = {
.release = single_release,
};
+static int telem_s0ix_res_get(void *data, u64 *val)
+{
+ u64 s0ix_total_res;
+ int ret;
+
+ ret = intel_pmc_s0ix_counter_read(&s0ix_total_res);
+ if (ret) {
+ pr_err("Failed to read S0ix residency");
+ return ret;
+ }
+
+ *val = s0ix_total_res;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(telem_s0ix_fops, telem_s0ix_res_get, NULL, "%llu\n");
+
static int telem_pss_trc_verb_show(struct seq_file *s, void *unused)
{
u32 verbosity;
@@ -938,7 +956,7 @@ static struct notifier_block pm_notifier = {
static int __init telemetry_debugfs_init(void)
{
const struct x86_cpu_id *id;
- int err = -ENOMEM;
+ int err;
struct dentry *f;
/* Only APL supported for now */
@@ -958,11 +976,10 @@ static int __init telemetry_debugfs_init(void)
register_pm_notifier(&pm_notifier);
+ err = -ENOMEM;
debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL);
- if (!debugfs_conf->telemetry_dbg_dir) {
- err = -ENOMEM;
+ if (!debugfs_conf->telemetry_dbg_dir)
goto out_pm;
- }
f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO,
debugfs_conf->telemetry_dbg_dir, NULL,
@@ -988,6 +1005,14 @@ static int __init telemetry_debugfs_init(void)
goto out;
}
+ f = debugfs_create_file("s0ix_residency_usec", S_IFREG | S_IRUGO,
+ debugfs_conf->telemetry_dbg_dir,
+ NULL, &telem_s0ix_fops);
+ if (!f) {
+ pr_err("s0ix_residency_usec debugfs register failed\n");
+ goto out;
+ }
+
f = debugfs_create_file("pss_trace_verbosity", S_IFREG | S_IRUGO,
debugfs_conf->telemetry_dbg_dir, NULL,
&telem_pss_trc_verb_ops);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 76b0a58e205b..5c39b3211709 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -437,7 +437,7 @@ static struct attribute *pcc_sysfs_entries[] = {
NULL,
};
-static struct attribute_group pcc_attr_group = {
+static const struct attribute_group pcc_attr_group = {
.name = NULL, /* put in device directory */
.attrs = pcc_sysfs_entries,
};
diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c
index ca75b4dc437e..77d1f90b0794 100644
--- a/drivers/platform/x86/peaq-wmi.c
+++ b/drivers/platform/x86/peaq-wmi.c
@@ -51,7 +51,7 @@ static void peaq_wmi_poll(struct input_polled_dev *dev)
return;
}
- if (peaq_ignore_events_counter && --peaq_ignore_events_counter > 0)
+ if (peaq_ignore_events_counter && --peaq_ignore_events_counter >= 0)
return;
if (obj.integer.value) {
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 5c4dfe48f03d..0c703feaeb88 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -1232,7 +1232,7 @@ static umode_t samsung_sysfs_is_visible(struct kobject *kobj,
return ok ? attr->mode : 0;
}
-static struct attribute_group platform_attribute_group = {
+static const struct attribute_group platform_attribute_group = {
.is_visible = samsung_sysfs_is_visible,
.attrs = platform_attributes
};
diff --git a/drivers/platform/x86/silead_dmi.c b/drivers/platform/x86/silead_dmi.c
index 3cd3bdfe51df..1157a7b646d6 100644
--- a/drivers/platform/x86/silead_dmi.c
+++ b/drivers/platform/x86/silead_dmi.c
@@ -122,6 +122,20 @@ static const struct silead_ts_dmi_data pov_mobii_wintab_p800w_data = {
.properties = pov_mobii_wintab_p800w_props,
};
+static const struct property_entry itworks_tw891_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1600),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 890),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"),
+ { }
+};
+
+static const struct silead_ts_dmi_data itworks_tw891_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = itworks_tw891_props,
+};
+
static const struct dmi_system_id silead_ts_dmi_table[] = {
{
/* CUBE iwork8 Air */
@@ -160,6 +174,16 @@ static const struct dmi_system_id silead_ts_dmi_table[] = {
},
},
{
+ /* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */
+ .driver_data = (void *)&surftab_wintron70_st70416_6_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Shenzhen PLOYER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MOMO7W"),
+ /* Exact match, different versions need different fw */
+ DMI_MATCH(DMI_BIOS_VERSION, "MOMO.G.WI71C.MABMRBA02"),
+ },
+ },
+ {
/* GP-electronic T701 */
.driver_data = (void *)&gp_electronic_t701_data,
.matches = {
@@ -187,6 +211,14 @@ static const struct dmi_system_id silead_ts_dmi_table[] = {
DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"),
},
},
+ {
+ /* I.T.Works TW891 */
+ .driver_data = (void *)&itworks_tw891_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW891"),
+ },
+ },
{ },
};
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 88f9f79a7cf6..bb1dcd7fbdeb 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -2419,7 +2419,7 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
return exists ? attr->mode : 0;
}
-static struct attribute_group toshiba_attr_group = {
+static const struct attribute_group toshiba_attr_group = {
.is_visible = toshiba_sysfs_is_visible,
.attrs = toshiba_attributes,
};
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index 3de802f169a1..9dff1b4b85fc 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -980,10 +980,37 @@ static int twl4030_bci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bci);
+ INIT_WORK(&bci->work, twl4030_bci_usb_work);
+ INIT_DELAYED_WORK(&bci->current_worker, twl4030_current_worker);
+
bci->channel_vac = devm_iio_channel_get(&pdev->dev, "vac");
if (IS_ERR(bci->channel_vac)) {
+ ret = PTR_ERR(bci->channel_vac);
+ if (ret == -EPROBE_DEFER)
+ return ret; /* iio not ready */
+ dev_warn(&pdev->dev, "could not request vac iio channel (%d)",
+ ret);
bci->channel_vac = NULL;
- dev_warn(&pdev->dev, "could not request vac iio channel");
+ }
+
+ if (bci->dev->of_node) {
+ struct device_node *phynode;
+
+ phynode = of_find_compatible_node(bci->dev->of_node->parent,
+ NULL, "ti,twl4030-usb");
+ if (phynode) {
+ bci->usb_nb.notifier_call = twl4030_bci_usb_ncb;
+ bci->transceiver = devm_usb_get_phy_by_node(
+ bci->dev, phynode, &bci->usb_nb);
+ if (IS_ERR(bci->transceiver)) {
+ ret = PTR_ERR(bci->transceiver);
+ if (ret == -EPROBE_DEFER)
+ return ret; /* phy not ready */
+ dev_warn(&pdev->dev, "could not request transceiver (%d)",
+ ret);
+ bci->transceiver = NULL;
+ }
+ }
}
bci->ac = devm_power_supply_register(&pdev->dev, &twl4030_bci_ac_desc,
@@ -1019,20 +1046,6 @@ static int twl4030_bci_probe(struct platform_device *pdev)
return ret;
}
- INIT_WORK(&bci->work, twl4030_bci_usb_work);
- INIT_DELAYED_WORK(&bci->current_worker, twl4030_current_worker);
-
- bci->usb_nb.notifier_call = twl4030_bci_usb_ncb;
- if (bci->dev->of_node) {
- struct device_node *phynode;
-
- phynode = of_find_compatible_node(bci->dev->of_node->parent,
- NULL, "ti,twl4030-usb");
- if (phynode)
- bci->transceiver = devm_usb_get_phy_by_node(
- bci->dev, phynode, &bci->usb_nb);
- }
-
/* Enable interrupts now. */
reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 |
TWL4030_TBATOR1 | TWL4030_BATSTS);
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index a0860b30bd93..1581f6ab1b1f 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -678,7 +678,9 @@ struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id)
pc = of_node_to_pwmchip(args.np);
if (IS_ERR(pc)) {
- pr_err("%s(): PWM chip not found\n", __func__);
+ if (PTR_ERR(pc) != -EPROBE_DEFER)
+ pr_err("%s(): PWM chip not found\n", __func__);
+
pwm = ERR_CAST(pc);
goto put;
}
diff --git a/drivers/pwm/pwm-bfin.c b/drivers/pwm/pwm-bfin.c
index d2ed0a2a18e8..a9a88137f2cb 100644
--- a/drivers/pwm/pwm-bfin.c
+++ b/drivers/pwm/pwm-bfin.c
@@ -118,10 +118,8 @@ static int bfin_pwm_probe(struct platform_device *pdev)
int ret;
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
- if (!pwm) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!pwm)
return -ENOMEM;
- }
platform_set_drvdata(pdev, pwm);
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index f6ca4e8c6253..9c13694eaa24 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -75,8 +75,8 @@ static int __cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index,
msg->version = 0;
msg->command = EC_CMD_PWM_GET_DUTY;
- msg->insize = sizeof(*params);
- msg->outsize = sizeof(*resp);
+ msg->insize = sizeof(*resp);
+ msg->outsize = sizeof(*params);
params->pwm_type = EC_PWM_TYPE_GENERIC;
params->index = index;
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index d0e8f8542626..8dadc58d6cdf 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -165,7 +165,7 @@ static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
-static struct pwm_ops hibvt_pwm_ops = {
+static const struct pwm_ops hibvt_pwm_ops = {
.get_state = hibvt_pwm_get_state,
.apply = hibvt_pwm_apply,
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 045ef9fa6fe3..cb845edfe2b4 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -103,6 +103,7 @@ struct meson_pwm_channel {
struct meson_pwm_data {
const char * const *parent_names;
+ unsigned int num_parents;
};
struct meson_pwm {
@@ -162,7 +163,8 @@ static int meson_pwm_calc(struct meson_pwm *meson,
unsigned int duty, unsigned int period)
{
unsigned int pre_div, cnt, duty_cnt;
- unsigned long fin_freq = -1, fin_ns;
+ unsigned long fin_freq = -1;
+ u64 fin_ps;
if (~(meson->inverter_mask >> id) & 0x1)
duty = period - duty;
@@ -178,13 +180,15 @@ static int meson_pwm_calc(struct meson_pwm *meson,
}
dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq);
- fin_ns = NSEC_PER_SEC / fin_freq;
+ fin_ps = (u64)NSEC_PER_SEC * 1000;
+ do_div(fin_ps, fin_freq);
/* Calc pre_div with the period */
for (pre_div = 0; pre_div < MISC_CLK_DIV_MASK; pre_div++) {
- cnt = DIV_ROUND_CLOSEST(period, fin_ns * (pre_div + 1));
- dev_dbg(meson->chip.dev, "fin_ns=%lu pre_div=%u cnt=%u\n",
- fin_ns, pre_div, cnt);
+ cnt = DIV_ROUND_CLOSEST_ULL((u64)period * 1000,
+ fin_ps * (pre_div + 1));
+ dev_dbg(meson->chip.dev, "fin_ps=%llu pre_div=%u cnt=%u\n",
+ fin_ps, pre_div, cnt);
if (cnt <= 0xffff)
break;
}
@@ -207,7 +211,8 @@ static int meson_pwm_calc(struct meson_pwm *meson,
channel->lo = cnt;
} else {
/* Then check is we can have the duty with the same pre_div */
- duty_cnt = DIV_ROUND_CLOSEST(duty, fin_ns * (pre_div + 1));
+ duty_cnt = DIV_ROUND_CLOSEST_ULL((u64)duty * 1000,
+ fin_ps * (pre_div + 1));
if (duty_cnt > 0xffff) {
dev_err(meson->chip.dev, "unable to get duty cycle\n");
return -EINVAL;
@@ -381,6 +386,7 @@ static const char * const pwm_meson8b_parent_names[] = {
static const struct meson_pwm_data pwm_meson8b_data = {
.parent_names = pwm_meson8b_parent_names,
+ .num_parents = ARRAY_SIZE(pwm_meson8b_parent_names),
};
static const char * const pwm_gxbb_parent_names[] = {
@@ -389,11 +395,35 @@ static const char * const pwm_gxbb_parent_names[] = {
static const struct meson_pwm_data pwm_gxbb_data = {
.parent_names = pwm_gxbb_parent_names,
+ .num_parents = ARRAY_SIZE(pwm_gxbb_parent_names),
+};
+
+/*
+ * Only the 2 first inputs of the GXBB AO PWMs are valid
+ * The last 2 are grounded
+ */
+static const char * const pwm_gxbb_ao_parent_names[] = {
+ "xtal", "clk81"
+};
+
+static const struct meson_pwm_data pwm_gxbb_ao_data = {
+ .parent_names = pwm_gxbb_ao_parent_names,
+ .num_parents = ARRAY_SIZE(pwm_gxbb_ao_parent_names),
};
static const struct of_device_id meson_pwm_matches[] = {
- { .compatible = "amlogic,meson8b-pwm", .data = &pwm_meson8b_data },
- { .compatible = "amlogic,meson-gxbb-pwm", .data = &pwm_gxbb_data },
+ {
+ .compatible = "amlogic,meson8b-pwm",
+ .data = &pwm_meson8b_data
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-pwm",
+ .data = &pwm_gxbb_data
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-ao-pwm",
+ .data = &pwm_gxbb_ao_data
+ },
{},
};
MODULE_DEVICE_TABLE(of, meson_pwm_matches);
@@ -417,7 +447,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
init.ops = &clk_mux_ops;
init.flags = CLK_IS_BASIC;
init.parent_names = meson->data->parent_names;
- init.num_parents = 1 << MISC_CLK_SEL_WIDTH;
+ init.num_parents = meson->data->num_parents;
channel->mux.reg = meson->base + REG_MISC_AB;
channel->mux.shift = mux_reg_shifts[i];
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 1284ffa05921..6d23f1d1c9b7 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -8,8 +8,10 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -44,6 +46,10 @@
#define PWM_DTY_MASK GENMASK(15, 0)
+#define PWM_REG_PRD(reg) ((((reg) >> 16) & PWM_PRD_MASK) + 1)
+#define PWM_REG_DTY(reg) ((reg) & PWM_DTY_MASK)
+#define PWM_REG_PRESCAL(reg, chan) (((reg) >> ((chan) * PWMCH_OFFSET)) & PWM_PRESCAL_MASK)
+
#define BIT_CH(bit, chan) ((bit) << ((chan) * PWMCH_OFFSET))
static const u32 prescaler_table[] = {
@@ -77,6 +83,8 @@ struct sun4i_pwm_chip {
void __iomem *base;
spinlock_t ctrl_lock;
const struct sun4i_pwm_data *data;
+ unsigned long next_period[2];
+ bool needs_delay[2];
};
static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip)
@@ -96,26 +104,65 @@ static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *chip,
writel(val, chip->base + offset);
}
-static int sun4i_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static void sun4i_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
- u32 prd, dty, val, clk_gate;
+ u64 clk_rate, tmp;
+ u32 val;
+ unsigned int prescaler;
+
+ clk_rate = clk_get_rate(sun4i_pwm->clk);
+
+ val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+
+ if ((val == PWM_PRESCAL_MASK) && sun4i_pwm->data->has_prescaler_bypass)
+ prescaler = 1;
+ else
+ prescaler = prescaler_table[PWM_REG_PRESCAL(val, pwm->hwpwm)];
+
+ if (prescaler == 0)
+ return;
+
+ if (val & BIT_CH(PWM_ACT_STATE, pwm->hwpwm))
+ state->polarity = PWM_POLARITY_NORMAL;
+ else
+ state->polarity = PWM_POLARITY_INVERSED;
+
+ if (val & BIT_CH(PWM_CLK_GATING | PWM_EN, pwm->hwpwm))
+ state->enabled = true;
+ else
+ state->enabled = false;
+
+ val = sun4i_pwm_readl(sun4i_pwm, PWM_CH_PRD(pwm->hwpwm));
+
+ tmp = prescaler * NSEC_PER_SEC * PWM_REG_DTY(val);
+ state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
+
+ tmp = prescaler * NSEC_PER_SEC * PWM_REG_PRD(val);
+ state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
+}
+
+static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
+ struct pwm_state *state,
+ u32 *dty, u32 *prd, unsigned int *prsclr)
+{
u64 clk_rate, div = 0;
- unsigned int prescaler = 0;
- int err;
+ unsigned int pval, prescaler = 0;
clk_rate = clk_get_rate(sun4i_pwm->clk);
if (sun4i_pwm->data->has_prescaler_bypass) {
/* First, test without any prescaler when available */
prescaler = PWM_PRESCAL_MASK;
+ pval = 1;
/*
* When not using any prescaler, the clock period in nanoseconds
* is not an integer so round it half up instead of
* truncating to get less surprising values.
*/
- div = clk_rate * period_ns + NSEC_PER_SEC / 2;
+ div = clk_rate * state->period + NSEC_PER_SEC / 2;
do_div(div, NSEC_PER_SEC);
if (div - 1 > PWM_PRD_MASK)
prescaler = 0;
@@ -126,137 +173,141 @@ static int sun4i_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
for (prescaler = 0; prescaler < PWM_PRESCAL_MASK; prescaler++) {
if (!prescaler_table[prescaler])
continue;
+ pval = prescaler_table[prescaler];
div = clk_rate;
- do_div(div, prescaler_table[prescaler]);
- div = div * period_ns;
+ do_div(div, pval);
+ div = div * state->period;
do_div(div, NSEC_PER_SEC);
if (div - 1 <= PWM_PRD_MASK)
break;
}
- if (div - 1 > PWM_PRD_MASK) {
- dev_err(chip->dev, "period exceeds the maximum value\n");
+ if (div - 1 > PWM_PRD_MASK)
return -EINVAL;
- }
- }
-
- prd = div;
- div *= duty_ns;
- do_div(div, period_ns);
- dty = div;
-
- err = clk_prepare_enable(sun4i_pwm->clk);
- if (err) {
- dev_err(chip->dev, "failed to enable PWM clock\n");
- return err;
- }
-
- spin_lock(&sun4i_pwm->ctrl_lock);
- val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
-
- if (sun4i_pwm->data->has_rdy && (val & PWM_RDY(pwm->hwpwm))) {
- spin_unlock(&sun4i_pwm->ctrl_lock);
- clk_disable_unprepare(sun4i_pwm->clk);
- return -EBUSY;
- }
-
- clk_gate = val & BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
- if (clk_gate) {
- val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
- sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
}
- val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
- val &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
- val |= BIT_CH(prescaler, pwm->hwpwm);
- sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
-
- val = (dty & PWM_DTY_MASK) | PWM_PRD(prd);
- sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
+ *prd = div;
+ div *= state->duty_cycle;
+ do_div(div, state->period);
+ *dty = div;
+ *prsclr = prescaler;
- if (clk_gate) {
- val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
- val |= clk_gate;
- sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
- }
+ div = (u64)pval * NSEC_PER_SEC * *prd;
+ state->period = DIV_ROUND_CLOSEST_ULL(div, clk_rate);
- spin_unlock(&sun4i_pwm->ctrl_lock);
- clk_disable_unprepare(sun4i_pwm->clk);
+ div = (u64)pval * NSEC_PER_SEC * *dty;
+ state->duty_cycle = DIV_ROUND_CLOSEST_ULL(div, clk_rate);
return 0;
}
-static int sun4i_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
- enum pwm_polarity polarity)
+static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
- u32 val;
+ struct pwm_state cstate;
+ u32 ctrl;
int ret;
+ unsigned int delay_us;
+ unsigned long now;
- ret = clk_prepare_enable(sun4i_pwm->clk);
- if (ret) {
- dev_err(chip->dev, "failed to enable PWM clock\n");
- return ret;
+ pwm_get_state(pwm, &cstate);
+
+ if (!cstate.enabled) {
+ ret = clk_prepare_enable(sun4i_pwm->clk);
+ if (ret) {
+ dev_err(chip->dev, "failed to enable PWM clock\n");
+ return ret;
+ }
}
spin_lock(&sun4i_pwm->ctrl_lock);
- val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+ ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
- if (polarity != PWM_POLARITY_NORMAL)
- val &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
- else
- val |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
+ if ((cstate.period != state->period) ||
+ (cstate.duty_cycle != state->duty_cycle)) {
+ u32 period, duty, val;
+ unsigned int prescaler;
- sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
+ ret = sun4i_pwm_calculate(sun4i_pwm, state,
+ &duty, &period, &prescaler);
+ if (ret) {
+ dev_err(chip->dev, "period exceeds the maximum value\n");
+ spin_unlock(&sun4i_pwm->ctrl_lock);
+ if (!cstate.enabled)
+ clk_disable_unprepare(sun4i_pwm->clk);
+ return ret;
+ }
- spin_unlock(&sun4i_pwm->ctrl_lock);
- clk_disable_unprepare(sun4i_pwm->clk);
+ if (PWM_REG_PRESCAL(ctrl, pwm->hwpwm) != prescaler) {
+ /* Prescaler changed, the clock has to be gated */
+ ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+ sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
- return 0;
-}
+ ctrl &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
+ ctrl |= BIT_CH(prescaler, pwm->hwpwm);
+ }
-static int sun4i_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
- u32 val;
- int ret;
+ val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
+ sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
+ sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
+ usecs_to_jiffies(cstate.period / 1000 + 1);
+ sun4i_pwm->needs_delay[pwm->hwpwm] = true;
+ }
- ret = clk_prepare_enable(sun4i_pwm->clk);
- if (ret) {
- dev_err(chip->dev, "failed to enable PWM clock\n");
- return ret;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
+ else
+ ctrl |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
+
+ ctrl |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+ if (state->enabled) {
+ ctrl |= BIT_CH(PWM_EN, pwm->hwpwm);
+ } else if (!sun4i_pwm->needs_delay[pwm->hwpwm]) {
+ ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm);
+ ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
}
- spin_lock(&sun4i_pwm->ctrl_lock);
- val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
- val |= BIT_CH(PWM_EN, pwm->hwpwm);
- val |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
- sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
+ sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
+
spin_unlock(&sun4i_pwm->ctrl_lock);
- return 0;
-}
+ if (state->enabled)
+ return 0;
-static void sun4i_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
- u32 val;
+ if (!sun4i_pwm->needs_delay[pwm->hwpwm]) {
+ clk_disable_unprepare(sun4i_pwm->clk);
+ return 0;
+ }
+
+ /* We need a full period to elapse before disabling the channel. */
+ now = jiffies;
+ if (sun4i_pwm->needs_delay[pwm->hwpwm] &&
+ time_before(now, sun4i_pwm->next_period[pwm->hwpwm])) {
+ delay_us = jiffies_to_usecs(sun4i_pwm->next_period[pwm->hwpwm] -
+ now);
+ if ((delay_us / 500) > MAX_UDELAY_MS)
+ msleep(delay_us / 1000 + 1);
+ else
+ usleep_range(delay_us, delay_us * 2);
+ }
+ sun4i_pwm->needs_delay[pwm->hwpwm] = false;
spin_lock(&sun4i_pwm->ctrl_lock);
- val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
- val &= ~BIT_CH(PWM_EN, pwm->hwpwm);
- val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
- sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
+ ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+ ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+ ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm);
+ sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
spin_unlock(&sun4i_pwm->ctrl_lock);
clk_disable_unprepare(sun4i_pwm->clk);
+
+ return 0;
}
static const struct pwm_ops sun4i_pwm_ops = {
- .config = sun4i_pwm_config,
- .set_polarity = sun4i_pwm_set_polarity,
- .enable = sun4i_pwm_enable,
- .disable = sun4i_pwm_disable,
+ .apply = sun4i_pwm_apply,
+ .get_state = sun4i_pwm_get_state,
.owner = THIS_MODULE,
};
@@ -316,8 +367,7 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
{
struct sun4i_pwm_chip *pwm;
struct resource *res;
- u32 val;
- int i, ret;
+ int ret;
const struct of_device_id *match;
match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev);
@@ -353,24 +403,7 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pwm);
- ret = clk_prepare_enable(pwm->clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable PWM clock\n");
- goto clk_error;
- }
-
- val = sun4i_pwm_readl(pwm, PWM_CTRL_REG);
- for (i = 0; i < pwm->chip.npwm; i++)
- if (!(val & BIT_CH(PWM_ACT_STATE, i)))
- pwm_set_polarity(&pwm->chip.pwms[i],
- PWM_POLARITY_INVERSED);
- clk_disable_unprepare(pwm->clk);
-
return 0;
-
-clk_error:
- pwmchip_remove(&pwm->chip);
- return ret;
}
static int sun4i_pwm_remove(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 8c6ed556db28..e9b33f09ff09 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -41,6 +41,9 @@
struct tegra_pwm_soc {
unsigned int num_channels;
+
+ /* Maximum IP frequency for given SoCs */
+ unsigned long max_frequency;
};
struct tegra_pwm_chip {
@@ -201,7 +204,18 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
- /* Read PWM clock rate from source */
+ /* Set maximum frequency of the IP */
+ ret = clk_set_rate(pwm->clk, pwm->soc->max_frequency);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to set max frequency: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * The requested and configured frequency may differ due to
+ * clock register resolutions. Get the configured frequency
+ * so that PWM period can be calculated more accurately.
+ */
pwm->clk_rate = clk_get_rate(pwm->clk);
pwm->rst = devm_reset_control_get(&pdev->dev, "pwm");
@@ -273,10 +287,12 @@ static int tegra_pwm_resume(struct device *dev)
static const struct tegra_pwm_soc tegra20_pwm_soc = {
.num_channels = 4,
+ .max_frequency = 48000000UL,
};
static const struct tegra_pwm_soc tegra186_pwm_soc = {
.num_channels = 1,
+ .max_frequency = 102000000UL,
};
static const struct of_device_id tegra_pwm_of_match[] = {
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 8d3b95728326..72419ac2c52a 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -77,6 +77,14 @@ config RTC_DEBUG
Say yes here to enable debugging support in the RTC framework
and individual RTC drivers.
+config RTC_NVMEM
+ bool "RTC non volatile storage support"
+ select NVMEM
+ default RTC_CLASS
+ help
+ Say yes here to add support for the non volatile (often battery
+ backed) storage present on RTCs.
+
comment "RTC interfaces"
config RTC_INTF_SYSFS
@@ -197,6 +205,17 @@ config RTC_DRV_AC100
This driver can also be built as a module. If so, the module
will be called rtc-ac100.
+config RTC_DRV_BRCMSTB
+ tristate "Broadcom STB wake-timer"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
+ default ARCH_BRCMSTB || BMIPS_GENERIC
+ help
+ If you say yes here you get support for the wake-timer found on
+ Broadcom STB SoCs (BCM7xxx).
+
+ This driver can also be built as a module. If so, the module will
+ be called rtc-brcmstb-waketimer.
+
config RTC_DRV_AS3722
tristate "ams AS3722 RTC driver"
depends on MFD_AS3722
@@ -791,6 +810,14 @@ config RTC_DRV_DS3232
This driver can also be built as a module. If so, the module
will be called rtc-ds3232.
+config RTC_DRV_DS3232_HWMON
+ bool "HWMON support for Dallas/Maxim DS3232/DS3234"
+ depends on RTC_DRV_DS3232 && HWMON && !(RTC_DRV_DS3232=y && HWMON=m)
+ default y
+ help
+ Say Y here if you want to expose temperature sensor data on
+ rtc-ds3232
+
config RTC_DRV_PCF2127
tristate "NXP PCF2127"
depends on RTC_I2C_AND_SPI
@@ -1484,16 +1511,16 @@ config RTC_DRV_ARMADA38X
This driver can also be built as a module. If so, the module
will be called armada38x-rtc.
-config RTC_DRV_GEMINI
- tristate "Gemini SoC RTC"
- depends on ARCH_GEMINI || COMPILE_TEST
+config RTC_DRV_FTRTC010
+ tristate "Faraday Technology FTRTC010 RTC"
depends on HAS_IOMEM
+ default ARCH_GEMINI
help
If you say Y here you will get support for the
- RTC found on Gemini SoC's.
+ Faraday Technolog FTRTC010 found on e.g. Gemini SoC's.
This driver can also be built as a module. If so, the module
- will be called rtc-gemini.
+ will be called rtc-ftrtc010.
config RTC_DRV_PS3
tristate "PS3 RTC"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 13857d2fce09..acd366b41c85 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -15,6 +15,7 @@ ifdef CONFIG_RTC_DRV_EFI
rtc-core-y += rtc-efi-platform.o
endif
+rtc-core-$(CONFIG_RTC_NVMEM) += nvmem.o
rtc-core-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o
rtc-core-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o
rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
@@ -36,6 +37,7 @@ obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o
obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
+obj-$(CONFIG_RTC_DRV_BRCMSTB) += rtc-brcmstb-waketimer.o
obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o
obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
@@ -67,7 +69,7 @@ obj-$(CONFIG_RTC_DRV_EFI) += rtc-efi.o
obj-$(CONFIG_RTC_DRV_EM3027) += rtc-em3027.o
obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o
-obj-$(CONFIG_RTC_DRV_GEMINI) += rtc-gemini.o
+obj-$(CONFIG_RTC_DRV_FTRTC010) += rtc-ftrtc010.o
obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o
obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o
obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 5fb439897fe1..2ed970d61da1 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -150,59 +150,19 @@ static SIMPLE_DEV_PM_OPS(rtc_class_dev_pm_ops, rtc_suspend, rtc_resume);
#define RTC_CLASS_DEV_PM_OPS NULL
#endif
-
-/**
- * rtc_device_register - register w/ RTC class
- * @dev: the device to register
- *
- * rtc_device_unregister() must be called when the class device is no
- * longer needed.
- *
- * Returns the pointer to the new struct class device.
- */
-struct rtc_device *rtc_device_register(const char *name, struct device *dev,
- const struct rtc_class_ops *ops,
- struct module *owner)
+/* Ensure the caller will set the id before releasing the device */
+static struct rtc_device *rtc_allocate_device(void)
{
struct rtc_device *rtc;
- struct rtc_wkalrm alrm;
- int of_id = -1, id = -1, err;
-
- if (dev->of_node)
- of_id = of_alias_get_id(dev->of_node, "rtc");
- else if (dev->parent && dev->parent->of_node)
- of_id = of_alias_get_id(dev->parent->of_node, "rtc");
- if (of_id >= 0) {
- id = ida_simple_get(&rtc_ida, of_id, of_id + 1,
- GFP_KERNEL);
- if (id < 0)
- dev_warn(dev, "/aliases ID %d not available\n",
- of_id);
- }
-
- if (id < 0) {
- id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- err = id;
- goto exit;
- }
- }
-
- rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL);
- if (rtc == NULL) {
- err = -ENOMEM;
- goto exit_ida;
- }
+ rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return NULL;
device_initialize(&rtc->dev);
- rtc->id = id;
- rtc->ops = ops;
- rtc->owner = owner;
rtc->irq_freq = 1;
rtc->max_user_freq = 64;
- rtc->dev.parent = dev;
rtc->dev.class = rtc_class;
rtc->dev.groups = rtc_get_dev_attribute_groups();
rtc->dev.release = rtc_device_release;
@@ -224,7 +184,64 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
rtc->pie_timer.function = rtc_pie_update_irq;
rtc->pie_enabled = 0;
- strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
+ return rtc;
+}
+
+static int rtc_device_get_id(struct device *dev)
+{
+ int of_id = -1, id = -1;
+
+ if (dev->of_node)
+ of_id = of_alias_get_id(dev->of_node, "rtc");
+ else if (dev->parent && dev->parent->of_node)
+ of_id = of_alias_get_id(dev->parent->of_node, "rtc");
+
+ if (of_id >= 0) {
+ id = ida_simple_get(&rtc_ida, of_id, of_id + 1, GFP_KERNEL);
+ if (id < 0)
+ dev_warn(dev, "/aliases ID %d not available\n", of_id);
+ }
+
+ if (id < 0)
+ id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
+
+ return id;
+}
+
+/**
+ * rtc_device_register - register w/ RTC class
+ * @dev: the device to register
+ *
+ * rtc_device_unregister() must be called when the class device is no
+ * longer needed.
+ *
+ * Returns the pointer to the new struct class device.
+ */
+struct rtc_device *rtc_device_register(const char *name, struct device *dev,
+ const struct rtc_class_ops *ops,
+ struct module *owner)
+{
+ struct rtc_device *rtc;
+ struct rtc_wkalrm alrm;
+ int id, err;
+
+ id = rtc_device_get_id(dev);
+ if (id < 0) {
+ err = id;
+ goto exit;
+ }
+
+ rtc = rtc_allocate_device();
+ if (!rtc) {
+ err = -ENOMEM;
+ goto exit_ida;
+ }
+
+ rtc->id = id;
+ rtc->ops = ops;
+ rtc->owner = owner;
+ rtc->dev.parent = dev;
+
dev_set_name(&rtc->dev, "rtc%d", id);
/* Check to see if there is an ALARM already set in hw */
@@ -238,20 +255,20 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
err = cdev_device_add(&rtc->char_dev, &rtc->dev);
if (err) {
dev_warn(&rtc->dev, "%s: failed to add char device %d:%d\n",
- rtc->name, MAJOR(rtc->dev.devt), rtc->id);
+ name, MAJOR(rtc->dev.devt), rtc->id);
/* This will free both memory and the ID */
put_device(&rtc->dev);
goto exit;
} else {
- dev_dbg(&rtc->dev, "%s: dev (%d:%d)\n", rtc->name,
+ dev_dbg(&rtc->dev, "%s: dev (%d:%d)\n", name,
MAJOR(rtc->dev.devt), rtc->id);
}
rtc_proc_add_device(rtc);
dev_info(dev, "rtc core: registered %s as %s\n",
- rtc->name, dev_name(&rtc->dev));
+ name, dev_name(&rtc->dev));
return rtc;
@@ -273,6 +290,8 @@ EXPORT_SYMBOL_GPL(rtc_device_register);
*/
void rtc_device_unregister(struct rtc_device *rtc)
{
+ rtc_nvmem_unregister(rtc);
+
mutex_lock(&rtc->ops_lock);
/*
* Remove innards of this RTC, then disable it, before
@@ -356,6 +375,91 @@ void devm_rtc_device_unregister(struct device *dev, struct rtc_device *rtc)
}
EXPORT_SYMBOL_GPL(devm_rtc_device_unregister);
+static void devm_rtc_release_device(struct device *dev, void *res)
+{
+ struct rtc_device *rtc = *(struct rtc_device **)res;
+
+ if (rtc->registered)
+ rtc_device_unregister(rtc);
+ else
+ put_device(&rtc->dev);
+}
+
+struct rtc_device *devm_rtc_allocate_device(struct device *dev)
+{
+ struct rtc_device **ptr, *rtc;
+ int id, err;
+
+ id = rtc_device_get_id(dev);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ ptr = devres_alloc(devm_rtc_release_device, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr) {
+ err = -ENOMEM;
+ goto exit_ida;
+ }
+
+ rtc = rtc_allocate_device();
+ if (!rtc) {
+ err = -ENOMEM;
+ goto exit_devres;
+ }
+
+ *ptr = rtc;
+ devres_add(dev, ptr);
+
+ rtc->id = id;
+ rtc->dev.parent = dev;
+ dev_set_name(&rtc->dev, "rtc%d", id);
+
+ return rtc;
+
+exit_devres:
+ devres_free(ptr);
+exit_ida:
+ ida_simple_remove(&rtc_ida, id);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(devm_rtc_allocate_device);
+
+int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
+{
+ struct rtc_wkalrm alrm;
+ int err;
+
+ if (!rtc->ops)
+ return -EINVAL;
+
+ rtc->owner = owner;
+
+ /* Check to see if there is an ALARM already set in hw */
+ err = __rtc_read_alarm(rtc, &alrm);
+ if (!err && !rtc_valid_tm(&alrm.time))
+ rtc_initialize_alarm(rtc, &alrm);
+
+ rtc_dev_prepare(rtc);
+
+ err = cdev_device_add(&rtc->char_dev, &rtc->dev);
+ if (err)
+ dev_warn(rtc->dev.parent, "failed to add char device %d:%d\n",
+ MAJOR(rtc->dev.devt), rtc->id);
+ else
+ dev_dbg(rtc->dev.parent, "char device (%d:%d)\n",
+ MAJOR(rtc->dev.devt), rtc->id);
+
+ rtc_proc_add_device(rtc);
+
+ rtc_nvmem_register(rtc);
+
+ rtc->registered = true;
+ dev_info(rtc->dev.parent, "registered as %s\n",
+ dev_name(&rtc->dev));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__rtc_register_device);
+
static int __init rtc_init(void)
{
rtc_class = class_create(THIS_MODULE, "rtc");
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index fc0fa7577636..8cec9a02c0b8 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -227,6 +227,13 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
missing = year;
}
+ /* Can't proceed if alarm is still invalid after replacing
+ * missing fields.
+ */
+ err = rtc_valid_tm(&alarm->time);
+ if (err)
+ goto done;
+
/* with luck, no rollover is needed */
t_now = rtc_tm_to_time64(&now);
t_alm = rtc_tm_to_time64(&alarm->time);
@@ -278,9 +285,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
dev_warn(&rtc->dev, "alarm rollover not handled\n");
}
-done:
err = rtc_valid_tm(&alarm->time);
+done:
if (err) {
dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n",
alarm->time.tm_year + 1900, alarm->time.tm_mon + 1,
diff --git a/drivers/rtc/nvmem.c b/drivers/rtc/nvmem.c
new file mode 100644
index 000000000000..8567b4ed9ac6
--- /dev/null
+++ b/drivers/rtc/nvmem.c
@@ -0,0 +1,113 @@
+/*
+ * RTC subsystem, nvmem interface
+ *
+ * Copyright (C) 2017 Alexandre Belloni
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/rtc.h>
+#include <linux/sysfs.h>
+
+#include "rtc-core.h"
+
+/*
+ * Deprecated ABI compatibility, this should be removed at some point
+ */
+
+static const char nvram_warning[] = "Deprecated ABI, please use nvmem";
+
+static ssize_t
+rtc_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct rtc_device *rtc = attr->private;
+
+ dev_warn_once(kobj_to_dev(kobj), nvram_warning);
+
+ return nvmem_device_read(rtc->nvmem, off, count, buf);
+}
+
+static ssize_t
+rtc_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct rtc_device *rtc = attr->private;
+
+ dev_warn_once(kobj_to_dev(kobj), nvram_warning);
+
+ return nvmem_device_write(rtc->nvmem, off, count, buf);
+}
+
+static int rtc_nvram_register(struct rtc_device *rtc)
+{
+ int err;
+
+ rtc->nvram = devm_kzalloc(rtc->dev.parent,
+ sizeof(struct bin_attribute),
+ GFP_KERNEL);
+ if (!rtc->nvram)
+ return -ENOMEM;
+
+ rtc->nvram->attr.name = "nvram";
+ rtc->nvram->attr.mode = 0644;
+ rtc->nvram->private = rtc;
+
+ sysfs_bin_attr_init(rtc->nvram);
+
+ rtc->nvram->read = rtc_nvram_read;
+ rtc->nvram->write = rtc_nvram_write;
+ rtc->nvram->size = rtc->nvmem_config->size;
+
+ err = sysfs_create_bin_file(&rtc->dev.parent->kobj,
+ rtc->nvram);
+ if (err) {
+ devm_kfree(rtc->dev.parent, rtc->nvram);
+ rtc->nvram = NULL;
+ }
+
+ return err;
+}
+
+static void rtc_nvram_unregister(struct rtc_device *rtc)
+{
+ sysfs_remove_bin_file(&rtc->dev.parent->kobj, rtc->nvram);
+}
+
+/*
+ * New ABI, uses nvmem
+ */
+void rtc_nvmem_register(struct rtc_device *rtc)
+{
+ if (!rtc->nvmem_config)
+ return;
+
+ rtc->nvmem_config->dev = &rtc->dev;
+ rtc->nvmem_config->owner = rtc->owner;
+ rtc->nvmem = nvmem_register(rtc->nvmem_config);
+ if (IS_ERR_OR_NULL(rtc->nvmem))
+ return;
+
+ /* Register the old ABI */
+ if (rtc->nvram_old_abi)
+ rtc_nvram_register(rtc);
+}
+
+void rtc_nvmem_unregister(struct rtc_device *rtc)
+{
+ if (IS_ERR_OR_NULL(rtc->nvmem))
+ return;
+
+ /* unregister the old ABI */
+ if (rtc->nvram)
+ rtc_nvram_unregister(rtc);
+
+ nvmem_unregister(rtc->nvmem);
+}
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index b60fd477778f..e221b78b6f10 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -409,6 +409,11 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+ platform_set_drvdata(pdev, rtc);
+
sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sclk))
return PTR_ERR(sclk);
@@ -441,13 +446,10 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
if (!device_can_wakeup(&pdev->dev))
device_init_wakeup(&pdev->dev, 1);
- rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &at91_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
+ rtc->ops = &at91_rtc_ops;
+ ret = rtc_register_device(rtc);
+ if (ret)
goto err_clk;
- }
- platform_set_drvdata(pdev, rtc);
/* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy
* completion.
diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
new file mode 100644
index 000000000000..796ac792a381
--- /dev/null
+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright © 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_wakeup.h>
+#include <linux/reboot.h>
+#include <linux/rtc.h>
+#include <linux/stat.h>
+#include <linux/suspend.h>
+
+struct brcmstb_waketmr {
+ struct rtc_device *rtc;
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+ struct notifier_block reboot_notifier;
+ struct clk *clk;
+ u32 rate;
+};
+
+#define BRCMSTB_WKTMR_EVENT 0x00
+#define BRCMSTB_WKTMR_COUNTER 0x04
+#define BRCMSTB_WKTMR_ALARM 0x08
+#define BRCMSTB_WKTMR_PRESCALER 0x0C
+#define BRCMSTB_WKTMR_PRESCALER_VAL 0x10
+
+#define BRCMSTB_WKTMR_DEFAULT_FREQ 27000000
+
+static inline void brcmstb_waketmr_clear_alarm(struct brcmstb_waketmr *timer)
+{
+ writel_relaxed(1, timer->base + BRCMSTB_WKTMR_EVENT);
+ (void)readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT);
+}
+
+static void brcmstb_waketmr_set_alarm(struct brcmstb_waketmr *timer,
+ unsigned int secs)
+{
+ brcmstb_waketmr_clear_alarm(timer);
+
+ writel_relaxed(secs + 1, timer->base + BRCMSTB_WKTMR_ALARM);
+}
+
+static irqreturn_t brcmstb_waketmr_irq(int irq, void *data)
+{
+ struct brcmstb_waketmr *timer = data;
+
+ pm_wakeup_event(timer->dev, 0);
+
+ return IRQ_HANDLED;
+}
+
+struct wktmr_time {
+ u32 sec;
+ u32 pre;
+};
+
+static void wktmr_read(struct brcmstb_waketmr *timer,
+ struct wktmr_time *t)
+{
+ u32 tmp;
+
+ do {
+ t->sec = readl_relaxed(timer->base + BRCMSTB_WKTMR_COUNTER);
+ tmp = readl_relaxed(timer->base + BRCMSTB_WKTMR_PRESCALER_VAL);
+ } while (tmp >= timer->rate);
+
+ t->pre = timer->rate - tmp;
+}
+
+static int brcmstb_waketmr_prepare_suspend(struct brcmstb_waketmr *timer)
+{
+ struct device *dev = timer->dev;
+ int ret = 0;
+
+ if (device_may_wakeup(dev)) {
+ ret = enable_irq_wake(timer->irq);
+ if (ret) {
+ dev_err(dev, "failed to enable wake-up interrupt\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/* If enabled as a wakeup-source, arm the timer when powering off */
+static int brcmstb_waketmr_reboot(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct brcmstb_waketmr *timer;
+
+ timer = container_of(nb, struct brcmstb_waketmr, reboot_notifier);
+
+ /* Set timer for cold boot */
+ if (action == SYS_POWER_OFF)
+ brcmstb_waketmr_prepare_suspend(timer);
+
+ return NOTIFY_DONE;
+}
+
+static int brcmstb_waketmr_gettime(struct device *dev,
+ struct rtc_time *tm)
+{
+ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+ struct wktmr_time now;
+
+ wktmr_read(timer, &now);
+
+ rtc_time_to_tm(now.sec, tm);
+
+ return 0;
+}
+
+static int brcmstb_waketmr_settime(struct device *dev,
+ struct rtc_time *tm)
+{
+ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+ time64_t sec;
+
+ sec = rtc_tm_to_time64(tm);
+
+ if (sec > U32_MAX || sec < 0)
+ return -EINVAL;
+
+ writel_relaxed(sec, timer->base + BRCMSTB_WKTMR_COUNTER);
+
+ return 0;
+}
+
+static int brcmstb_waketmr_getalarm(struct device *dev,
+ struct rtc_wkalrm *alarm)
+{
+ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+ time64_t sec;
+ u32 reg;
+
+ sec = readl_relaxed(timer->base + BRCMSTB_WKTMR_ALARM);
+ if (sec != 0) {
+ /* Alarm is enabled */
+ alarm->enabled = 1;
+ rtc_time64_to_tm(sec, &alarm->time);
+ }
+
+ reg = readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT);
+ alarm->pending = !!(reg & 1);
+
+ return 0;
+}
+
+static int brcmstb_waketmr_setalarm(struct device *dev,
+ struct rtc_wkalrm *alarm)
+{
+ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+ time64_t sec;
+
+ if (alarm->enabled)
+ sec = rtc_tm_to_time64(&alarm->time);
+ else
+ sec = 0;
+
+ if (sec > U32_MAX || sec < 0)
+ return -EINVAL;
+
+ brcmstb_waketmr_set_alarm(timer, sec);
+
+ return 0;
+}
+
+/*
+ * Does not do much but keep the RTC class happy. We always support
+ * alarms.
+ */
+static int brcmstb_waketmr_alarm_enable(struct device *dev,
+ unsigned int enabled)
+{
+ return 0;
+}
+
+static const struct rtc_class_ops brcmstb_waketmr_ops = {
+ .read_time = brcmstb_waketmr_gettime,
+ .set_time = brcmstb_waketmr_settime,
+ .read_alarm = brcmstb_waketmr_getalarm,
+ .set_alarm = brcmstb_waketmr_setalarm,
+ .alarm_irq_enable = brcmstb_waketmr_alarm_enable,
+};
+
+static int brcmstb_waketmr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct brcmstb_waketmr *timer;
+ struct resource *res;
+ int ret;
+
+ timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
+ if (!timer)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, timer);
+ timer->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ timer->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(timer->base))
+ return PTR_ERR(timer->base);
+
+ /*
+ * Set wakeup capability before requesting wakeup interrupt, so we can
+ * process boot-time "wakeups" (e.g., from S5 soft-off)
+ */
+ device_set_wakeup_capable(dev, true);
+ device_wakeup_enable(dev);
+
+ timer->irq = platform_get_irq(pdev, 0);
+ if (timer->irq < 0)
+ return -ENODEV;
+
+ timer->clk = devm_clk_get(dev, NULL);
+ if (!IS_ERR(timer->clk)) {
+ ret = clk_prepare_enable(timer->clk);
+ if (ret)
+ return ret;
+ timer->rate = clk_get_rate(timer->clk);
+ if (!timer->rate)
+ timer->rate = BRCMSTB_WKTMR_DEFAULT_FREQ;
+ } else {
+ timer->rate = BRCMSTB_WKTMR_DEFAULT_FREQ;
+ timer->clk = NULL;
+ }
+
+ ret = devm_request_irq(dev, timer->irq, brcmstb_waketmr_irq, 0,
+ "brcmstb-waketimer", timer);
+ if (ret < 0)
+ return ret;
+
+ timer->reboot_notifier.notifier_call = brcmstb_waketmr_reboot;
+ register_reboot_notifier(&timer->reboot_notifier);
+
+ timer->rtc = rtc_device_register("brcmstb-waketmr", dev,
+ &brcmstb_waketmr_ops, THIS_MODULE);
+ if (IS_ERR(timer->rtc)) {
+ dev_err(dev, "unable to register device\n");
+ unregister_reboot_notifier(&timer->reboot_notifier);
+ return PTR_ERR(timer->rtc);
+ }
+
+ dev_info(dev, "registered, with irq %d\n", timer->irq);
+
+ return ret;
+}
+
+static int brcmstb_waketmr_remove(struct platform_device *pdev)
+{
+ struct brcmstb_waketmr *timer = dev_get_drvdata(&pdev->dev);
+
+ unregister_reboot_notifier(&timer->reboot_notifier);
+ rtc_device_unregister(timer->rtc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmstb_waketmr_suspend(struct device *dev)
+{
+ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+
+ return brcmstb_waketmr_prepare_suspend(timer);
+}
+
+static int brcmstb_waketmr_resume(struct device *dev)
+{
+ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+ int ret;
+
+ if (!device_may_wakeup(dev))
+ return 0;
+
+ ret = disable_irq_wake(timer->irq);
+
+ brcmstb_waketmr_clear_alarm(timer);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(brcmstb_waketmr_pm_ops,
+ brcmstb_waketmr_suspend, brcmstb_waketmr_resume);
+
+static const struct of_device_id brcmstb_waketmr_of_match[] = {
+ { .compatible = "brcm,brcmstb-waketimer" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver brcmstb_waketmr_driver = {
+ .probe = brcmstb_waketmr_probe,
+ .remove = brcmstb_waketmr_remove,
+ .driver = {
+ .name = "brcmstb-waketimer",
+ .pm = &brcmstb_waketmr_pm_ops,
+ .of_match_table = of_match_ptr(brcmstb_waketmr_of_match),
+ }
+};
+module_platform_driver(brcmstb_waketmr_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_AUTHOR("Markus Mayer");
+MODULE_DESCRIPTION("Wake-up timer driver for STB chips");
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index 7a4ed2f7c7d7..ecab76a3207c 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -45,3 +45,11 @@ static inline const struct attribute_group **rtc_get_dev_attribute_groups(void)
return NULL;
}
#endif
+
+#ifdef CONFIG_RTC_NVMEM
+void rtc_nvmem_register(struct rtc_device *rtc);
+void rtc_nvmem_unregister(struct rtc_device *rtc);
+#else
+static inline void rtc_nvmem_register(struct rtc_device *rtc) {}
+static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {}
+#endif
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index e81a8711fea7..794bc4fa4937 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -464,7 +464,7 @@ void rtc_dev_prepare(struct rtc_device *rtc)
return;
if (rtc->id >= RTC_DEV_MAX) {
- dev_dbg(&rtc->dev, "%s: too many RTC devices\n", rtc->name);
+ dev_dbg(&rtc->dev, "too many RTC devices\n");
return;
}
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 77339b3d50a1..4fac49e55d47 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -24,6 +24,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/clk-provider.h>
+#include <linux/regmap.h>
/*
* We can't determine type by probing, but if we expect pre-Linux code
@@ -33,6 +34,7 @@
*/
enum ds_type {
ds_1307,
+ ds_1308,
ds_1337,
ds_1338,
ds_1339,
@@ -43,6 +45,7 @@ enum ds_type {
m41t00,
mcp794xx,
rx_8025,
+ rx_8130,
last_ds_type /* always last */
/* rs5c372 too? different address... */
};
@@ -115,17 +118,16 @@ struct ds1307 {
u8 offset; /* register's offset */
u8 regs[11];
u16 nvram_offset;
- struct bin_attribute *nvram;
+ struct nvmem_config nvmem_cfg;
enum ds_type type;
unsigned long flags;
#define HAS_NVRAM 0 /* bit 0 == sysfs file active */
#define HAS_ALARM 1 /* bit 1 == irq claimed */
- struct i2c_client *client;
+ struct device *dev;
+ struct regmap *regmap;
+ const char *name;
+ int irq;
struct rtc_device *rtc;
- s32 (*read_block_data)(const struct i2c_client *client, u8 command,
- u8 length, u8 *values);
- s32 (*write_block_data)(const struct i2c_client *client, u8 command,
- u8 length, const u8 *values);
#ifdef CONFIG_COMMON_CLK
struct clk_hw clks[2];
#endif
@@ -135,21 +137,30 @@ struct chip_desc {
unsigned alarm:1;
u16 nvram_offset;
u16 nvram_size;
+ u8 century_reg;
+ u8 century_enable_bit;
+ u8 century_bit;
u16 trickle_charger_reg;
u8 trickle_charger_setup;
- u8 (*do_trickle_setup)(struct i2c_client *, uint32_t, bool);
+ u8 (*do_trickle_setup)(struct ds1307 *, uint32_t,
+ bool);
};
-static u8 do_trickle_setup_ds1339(struct i2c_client *,
- uint32_t ohms, bool diode);
+static u8 do_trickle_setup_ds1339(struct ds1307 *, uint32_t ohms, bool diode);
static struct chip_desc chips[last_ds_type] = {
[ds_1307] = {
.nvram_offset = 8,
.nvram_size = 56,
},
+ [ds_1308] = {
+ .nvram_offset = 8,
+ .nvram_size = 56,
+ },
[ds_1337] = {
.alarm = 1,
+ .century_reg = DS1307_REG_MONTH,
+ .century_bit = DS1337_BIT_CENTURY,
},
[ds_1338] = {
.nvram_offset = 8,
@@ -157,10 +168,15 @@ static struct chip_desc chips[last_ds_type] = {
},
[ds_1339] = {
.alarm = 1,
+ .century_reg = DS1307_REG_MONTH,
+ .century_bit = DS1337_BIT_CENTURY,
.trickle_charger_reg = 0x10,
.do_trickle_setup = &do_trickle_setup_ds1339,
},
[ds_1340] = {
+ .century_reg = DS1307_REG_HOUR,
+ .century_enable_bit = DS1340_BIT_CENTURY_EN,
+ .century_bit = DS1340_BIT_CENTURY,
.trickle_charger_reg = 0x08,
},
[ds_1388] = {
@@ -168,6 +184,14 @@ static struct chip_desc chips[last_ds_type] = {
},
[ds_3231] = {
.alarm = 1,
+ .century_reg = DS1307_REG_MONTH,
+ .century_bit = DS1337_BIT_CENTURY,
+ },
+ [rx_8130] = {
+ .alarm = 1,
+ /* this is battery backed SRAM */
+ .nvram_offset = 0x20,
+ .nvram_size = 4, /* 32bit (4 word x 8 bit) */
},
[mcp794xx] = {
.alarm = 1,
@@ -179,6 +203,7 @@ static struct chip_desc chips[last_ds_type] = {
static const struct i2c_device_id ds1307_id[] = {
{ "ds1307", ds_1307 },
+ { "ds1308", ds_1308 },
{ "ds1337", ds_1337 },
{ "ds1338", ds_1338 },
{ "ds1339", ds_1339 },
@@ -192,6 +217,7 @@ static const struct i2c_device_id ds1307_id[] = {
{ "pt7c4338", ds_1307 },
{ "rx8025", rx_8025 },
{ "isl12057", ds_1337 },
+ { "rx8130", rx_8130 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1307_id);
@@ -203,6 +229,10 @@ static const struct of_device_id ds1307_of_match[] = {
.data = (void *)ds_1307
},
{
+ .compatible = "dallas,ds1308",
+ .data = (void *)ds_1308
+ },
+ {
.compatible = "dallas,ds1337",
.data = (void *)ds_1337
},
@@ -262,6 +292,7 @@ MODULE_DEVICE_TABLE(of, ds1307_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id ds1307_acpi_ids[] = {
{ .id = "DS1307", .driver_data = ds_1307 },
+ { .id = "DS1308", .driver_data = ds_1308 },
{ .id = "DS1337", .driver_data = ds_1337 },
{ .id = "DS1338", .driver_data = ds_1338 },
{ .id = "DS1339", .driver_data = ds_1339 },
@@ -280,136 +311,6 @@ static const struct acpi_device_id ds1307_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids);
#endif
-/*----------------------------------------------------------------------*/
-
-#define BLOCK_DATA_MAX_TRIES 10
-
-static s32 ds1307_read_block_data_once(const struct i2c_client *client,
- u8 command, u8 length, u8 *values)
-{
- s32 i, data;
-
- for (i = 0; i < length; i++) {
- data = i2c_smbus_read_byte_data(client, command + i);
- if (data < 0)
- return data;
- values[i] = data;
- }
- return i;
-}
-
-static s32 ds1307_read_block_data(const struct i2c_client *client, u8 command,
- u8 length, u8 *values)
-{
- u8 oldvalues[255];
- s32 ret;
- int tries = 0;
-
- dev_dbg(&client->dev, "ds1307_read_block_data (length=%d)\n", length);
- ret = ds1307_read_block_data_once(client, command, length, values);
- if (ret < 0)
- return ret;
- do {
- if (++tries > BLOCK_DATA_MAX_TRIES) {
- dev_err(&client->dev,
- "ds1307_read_block_data failed\n");
- return -EIO;
- }
- memcpy(oldvalues, values, length);
- ret = ds1307_read_block_data_once(client, command, length,
- values);
- if (ret < 0)
- return ret;
- } while (memcmp(oldvalues, values, length));
- return length;
-}
-
-static s32 ds1307_write_block_data(const struct i2c_client *client, u8 command,
- u8 length, const u8 *values)
-{
- u8 currvalues[255];
- int tries = 0;
-
- dev_dbg(&client->dev, "ds1307_write_block_data (length=%d)\n", length);
- do {
- s32 i, ret;
-
- if (++tries > BLOCK_DATA_MAX_TRIES) {
- dev_err(&client->dev,
- "ds1307_write_block_data failed\n");
- return -EIO;
- }
- for (i = 0; i < length; i++) {
- ret = i2c_smbus_write_byte_data(client, command + i,
- values[i]);
- if (ret < 0)
- return ret;
- }
- ret = ds1307_read_block_data_once(client, command, length,
- currvalues);
- if (ret < 0)
- return ret;
- } while (memcmp(currvalues, values, length));
- return length;
-}
-
-/*----------------------------------------------------------------------*/
-
-/* These RTC devices are not designed to be connected to a SMbus adapter.
- SMbus limits block operations length to 32 bytes, whereas it's not
- limited on I2C buses. As a result, accesses may exceed 32 bytes;
- in that case, split them into smaller blocks */
-
-static s32 ds1307_native_smbus_write_block_data(const struct i2c_client *client,
- u8 command, u8 length, const u8 *values)
-{
- u8 suboffset = 0;
-
- if (length <= I2C_SMBUS_BLOCK_MAX) {
- s32 retval = i2c_smbus_write_i2c_block_data(client,
- command, length, values);
- if (retval < 0)
- return retval;
- return length;
- }
-
- while (suboffset < length) {
- s32 retval = i2c_smbus_write_i2c_block_data(client,
- command + suboffset,
- min(I2C_SMBUS_BLOCK_MAX, length - suboffset),
- values + suboffset);
- if (retval < 0)
- return retval;
-
- suboffset += I2C_SMBUS_BLOCK_MAX;
- }
- return length;
-}
-
-static s32 ds1307_native_smbus_read_block_data(const struct i2c_client *client,
- u8 command, u8 length, u8 *values)
-{
- u8 suboffset = 0;
-
- if (length <= I2C_SMBUS_BLOCK_MAX)
- return i2c_smbus_read_i2c_block_data(client,
- command, length, values);
-
- while (suboffset < length) {
- s32 retval = i2c_smbus_read_i2c_block_data(client,
- command + suboffset,
- min(I2C_SMBUS_BLOCK_MAX, length - suboffset),
- values + suboffset);
- if (retval < 0)
- return retval;
-
- suboffset += I2C_SMBUS_BLOCK_MAX;
- }
- return length;
-}
-
-/*----------------------------------------------------------------------*/
-
/*
* The ds1337 and ds1339 both have two alarms, but we only use the first
* one (with a "seconds" field). For ds1337 we expect nINTA is our alarm
@@ -417,27 +318,24 @@ static s32 ds1307_native_smbus_read_block_data(const struct i2c_client *client,
*/
static irqreturn_t ds1307_irq(int irq, void *dev_id)
{
- struct i2c_client *client = dev_id;
- struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ struct ds1307 *ds1307 = dev_id;
struct mutex *lock = &ds1307->rtc->ops_lock;
- int stat, control;
+ int stat, ret;
mutex_lock(lock);
- stat = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS);
- if (stat < 0)
+ ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &stat);
+ if (ret)
goto out;
if (stat & DS1337_BIT_A1I) {
stat &= ~DS1337_BIT_A1I;
- i2c_smbus_write_byte_data(client, DS1337_REG_STATUS, stat);
+ regmap_write(ds1307->regmap, DS1337_REG_STATUS, stat);
- control = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
- if (control < 0)
+ ret = regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL,
+ DS1337_BIT_A1IE, 0);
+ if (ret)
goto out;
- control &= ~DS1337_BIT_A1IE;
- i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, control);
-
rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
}
@@ -452,14 +350,14 @@ out:
static int ds1307_get_time(struct device *dev, struct rtc_time *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
- int tmp;
+ int tmp, ret;
+ const struct chip_desc *chip = &chips[ds1307->type];
/* read the RTC date and time registers all at once */
- tmp = ds1307->read_block_data(ds1307->client,
- ds1307->offset, 7, ds1307->regs);
- if (tmp != 7) {
- dev_err(dev, "%s error %d\n", "read", tmp);
- return -EIO;
+ ret = regmap_bulk_read(ds1307->regmap, ds1307->offset, ds1307->regs, 7);
+ if (ret) {
+ dev_err(dev, "%s error %d\n", "read", ret);
+ return ret;
}
dev_dbg(dev, "%s: %7ph\n", "read", ds1307->regs);
@@ -481,22 +379,9 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
t->tm_mon = bcd2bin(tmp) - 1;
t->tm_year = bcd2bin(ds1307->regs[DS1307_REG_YEAR]) + 100;
-#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
- switch (ds1307->type) {
- case ds_1337:
- case ds_1339:
- case ds_3231:
- if (ds1307->regs[DS1307_REG_MONTH] & DS1337_BIT_CENTURY)
- t->tm_year += 100;
- break;
- case ds_1340:
- if (ds1307->regs[DS1307_REG_HOUR] & DS1340_BIT_CENTURY)
- t->tm_year += 100;
- break;
- default:
- break;
- }
-#endif
+ if (ds1307->regs[chip->century_reg] & chip->century_bit &&
+ IS_ENABLED(CONFIG_RTC_DRV_DS1307_CENTURY))
+ t->tm_year += 100;
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -511,6 +396,7 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
static int ds1307_set_time(struct device *dev, struct rtc_time *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
+ const struct chip_desc *chip = &chips[ds1307->type];
int result;
int tmp;
u8 *buf = ds1307->regs;
@@ -521,24 +407,14 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
-#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
if (t->tm_year < 100)
return -EINVAL;
- switch (ds1307->type) {
- case ds_1337:
- case ds_1339:
- case ds_3231:
- case ds_1340:
- if (t->tm_year > 299)
- return -EINVAL;
- default:
- if (t->tm_year > 199)
- return -EINVAL;
- break;
- }
+#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
+ if (t->tm_year > (chip->century_bit ? 299 : 199))
+ return -EINVAL;
#else
- if (t->tm_year < 100 || t->tm_year > 199)
+ if (t->tm_year > 199)
return -EINVAL;
#endif
@@ -553,19 +429,12 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
tmp = t->tm_year - 100;
buf[DS1307_REG_YEAR] = bin2bcd(tmp);
- switch (ds1307->type) {
- case ds_1337:
- case ds_1339:
- case ds_3231:
- if (t->tm_year > 199)
- buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
- break;
- case ds_1340:
- buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN;
- if (t->tm_year > 199)
- buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY;
- break;
- case mcp794xx:
+ if (chip->century_enable_bit)
+ buf[chip->century_reg] |= chip->century_enable_bit;
+ if (t->tm_year > 199 && chip->century_bit)
+ buf[chip->century_reg] |= chip->century_bit;
+
+ if (ds1307->type == mcp794xx) {
/*
* these bits were cleared when preparing the date/time
* values and need to be set again before writing the
@@ -573,16 +442,12 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
*/
buf[DS1307_REG_SECS] |= MCP794XX_BIT_ST;
buf[DS1307_REG_WDAY] |= MCP794XX_BIT_VBATEN;
- break;
- default:
- break;
}
dev_dbg(dev, "%s: %7ph\n", "write", buf);
- result = ds1307->write_block_data(ds1307->client,
- ds1307->offset, 7, buf);
- if (result < 0) {
+ result = regmap_bulk_write(ds1307->regmap, ds1307->offset, buf, 7);
+ if (result) {
dev_err(dev, "%s error %d\n", "write", result);
return result;
}
@@ -591,19 +456,18 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ struct ds1307 *ds1307 = dev_get_drvdata(dev);
int ret;
if (!test_bit(HAS_ALARM, &ds1307->flags))
return -EINVAL;
/* read all ALARM1, ALARM2, and status registers at once */
- ret = ds1307->read_block_data(client,
- DS1339_REG_ALARM1_SECS, 9, ds1307->regs);
- if (ret != 9) {
+ ret = regmap_bulk_read(ds1307->regmap, DS1339_REG_ALARM1_SECS,
+ ds1307->regs, 9);
+ if (ret) {
dev_err(dev, "%s error %d\n", "alarm read", ret);
- return -EIO;
+ return ret;
}
dev_dbg(dev, "%s: %4ph, %3ph, %2ph\n", "alarm read",
@@ -633,8 +497,7 @@ static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t)
static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ struct ds1307 *ds1307 = dev_get_drvdata(dev);
unsigned char *buf = ds1307->regs;
u8 control, status;
int ret;
@@ -649,11 +512,10 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t->enabled, t->pending);
/* read current status of both alarms and the chip */
- ret = ds1307->read_block_data(client,
- DS1339_REG_ALARM1_SECS, 9, buf);
- if (ret != 9) {
+ ret = regmap_bulk_read(ds1307->regmap, DS1339_REG_ALARM1_SECS, buf, 9);
+ if (ret) {
dev_err(dev, "%s error %d\n", "alarm write", ret);
- return -EIO;
+ return ret;
}
control = ds1307->regs[7];
status = ds1307->regs[8];
@@ -676,9 +538,8 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
buf[7] = control & ~(DS1337_BIT_A1IE | DS1337_BIT_A2IE);
buf[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I);
- ret = ds1307->write_block_data(client,
- DS1339_REG_ALARM1_SECS, 9, buf);
- if (ret < 0) {
+ ret = regmap_bulk_write(ds1307->regmap, DS1339_REG_ALARM1_SECS, buf, 9);
+ if (ret) {
dev_err(dev, "can't set alarm time\n");
return ret;
}
@@ -687,7 +548,7 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
if (t->enabled) {
dev_dbg(dev, "alarm IRQ armed\n");
buf[7] |= DS1337_BIT_A1IE; /* only ALARM1 is used */
- i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, buf[7]);
+ regmap_write(ds1307->regmap, DS1337_REG_CONTROL, buf[7]);
}
return 0;
@@ -695,35 +556,181 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1307 *ds1307 = i2c_get_clientdata(client);
- int ret;
+ struct ds1307 *ds1307 = dev_get_drvdata(dev);
if (!test_bit(HAS_ALARM, &ds1307->flags))
return -ENOTTY;
- ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
+ return regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL,
+ DS1337_BIT_A1IE,
+ enabled ? DS1337_BIT_A1IE : 0);
+}
+
+static const struct rtc_class_ops ds13xx_rtc_ops = {
+ .read_time = ds1307_get_time,
+ .set_time = ds1307_set_time,
+ .read_alarm = ds1337_read_alarm,
+ .set_alarm = ds1337_set_alarm,
+ .alarm_irq_enable = ds1307_alarm_irq_enable,
+};
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Alarm support for rx8130 devices.
+ */
+
+#define RX8130_REG_ALARM_MIN 0x07
+#define RX8130_REG_ALARM_HOUR 0x08
+#define RX8130_REG_ALARM_WEEK_OR_DAY 0x09
+#define RX8130_REG_EXTENSION 0x0c
+#define RX8130_REG_EXTENSION_WADA (1 << 3)
+#define RX8130_REG_FLAG 0x0d
+#define RX8130_REG_FLAG_AF (1 << 3)
+#define RX8130_REG_CONTROL0 0x0e
+#define RX8130_REG_CONTROL0_AIE (1 << 3)
+
+static irqreturn_t rx8130_irq(int irq, void *dev_id)
+{
+ struct ds1307 *ds1307 = dev_id;
+ struct mutex *lock = &ds1307->rtc->ops_lock;
+ u8 ctl[3];
+ int ret;
+
+ mutex_lock(lock);
+
+ /* Read control registers. */
+ ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3);
if (ret < 0)
- return ret;
+ goto out;
+ if (!(ctl[1] & RX8130_REG_FLAG_AF))
+ goto out;
+ ctl[1] &= ~RX8130_REG_FLAG_AF;
+ ctl[2] &= ~RX8130_REG_CONTROL0_AIE;
- if (enabled)
- ret |= DS1337_BIT_A1IE;
- else
- ret &= ~DS1337_BIT_A1IE;
+ ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3);
+ if (ret < 0)
+ goto out;
+
+ rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
+
+out:
+ mutex_unlock(lock);
+
+ return IRQ_HANDLED;
+}
- ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret);
+static int rx8130_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct ds1307 *ds1307 = dev_get_drvdata(dev);
+ u8 ald[3], ctl[3];
+ int ret;
+
+ if (!test_bit(HAS_ALARM, &ds1307->flags))
+ return -EINVAL;
+
+ /* Read alarm registers. */
+ ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_ALARM_MIN, ald, 3);
if (ret < 0)
return ret;
+ /* Read control registers. */
+ ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3);
+ if (ret < 0)
+ return ret;
+
+ t->enabled = !!(ctl[2] & RX8130_REG_CONTROL0_AIE);
+ t->pending = !!(ctl[1] & RX8130_REG_FLAG_AF);
+
+ /* Report alarm 0 time assuming 24-hour and day-of-month modes. */
+ t->time.tm_sec = -1;
+ t->time.tm_min = bcd2bin(ald[0] & 0x7f);
+ t->time.tm_hour = bcd2bin(ald[1] & 0x7f);
+ t->time.tm_wday = -1;
+ t->time.tm_mday = bcd2bin(ald[2] & 0x7f);
+ t->time.tm_mon = -1;
+ t->time.tm_year = -1;
+ t->time.tm_yday = -1;
+ t->time.tm_isdst = -1;
+
+ dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d enabled=%d\n",
+ __func__, t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
+ t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled);
+
return 0;
}
-static const struct rtc_class_ops ds13xx_rtc_ops = {
+static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct ds1307 *ds1307 = dev_get_drvdata(dev);
+ u8 ald[3], ctl[3];
+ int ret;
+
+ if (!test_bit(HAS_ALARM, &ds1307->flags))
+ return -EINVAL;
+
+ dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
+ "enabled=%d pending=%d\n", __func__,
+ t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
+ t->time.tm_wday, t->time.tm_mday, t->time.tm_mon,
+ t->enabled, t->pending);
+
+ /* Read control registers. */
+ ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3);
+ if (ret < 0)
+ return ret;
+
+ ctl[0] &= ~RX8130_REG_EXTENSION_WADA;
+ ctl[1] |= RX8130_REG_FLAG_AF;
+ ctl[2] &= ~RX8130_REG_CONTROL0_AIE;
+
+ ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3);
+ if (ret < 0)
+ return ret;
+
+ /* Hardware alarm precision is 1 minute! */
+ ald[0] = bin2bcd(t->time.tm_min);
+ ald[1] = bin2bcd(t->time.tm_hour);
+ ald[2] = bin2bcd(t->time.tm_mday);
+
+ ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_ALARM_MIN, ald, 3);
+ if (ret < 0)
+ return ret;
+
+ if (!t->enabled)
+ return 0;
+
+ ctl[2] |= RX8130_REG_CONTROL0_AIE;
+
+ return regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3);
+}
+
+static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct ds1307 *ds1307 = dev_get_drvdata(dev);
+ int ret, reg;
+
+ if (!test_bit(HAS_ALARM, &ds1307->flags))
+ return -EINVAL;
+
+ ret = regmap_read(ds1307->regmap, RX8130_REG_CONTROL0, &reg);
+ if (ret < 0)
+ return ret;
+
+ if (enabled)
+ reg |= RX8130_REG_CONTROL0_AIE;
+ else
+ reg &= ~RX8130_REG_CONTROL0_AIE;
+
+ return regmap_write(ds1307->regmap, RX8130_REG_CONTROL0, reg);
+}
+
+static const struct rtc_class_ops rx8130_rtc_ops = {
.read_time = ds1307_get_time,
.set_time = ds1307_set_time,
- .read_alarm = ds1337_read_alarm,
- .set_alarm = ds1337_set_alarm,
- .alarm_irq_enable = ds1307_alarm_irq_enable,
+ .read_alarm = rx8130_read_alarm,
+ .set_alarm = rx8130_set_alarm,
+ .alarm_irq_enable = rx8130_alarm_irq_enable,
};
/*----------------------------------------------------------------------*/
@@ -752,31 +759,27 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
static irqreturn_t mcp794xx_irq(int irq, void *dev_id)
{
- struct i2c_client *client = dev_id;
- struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ struct ds1307 *ds1307 = dev_id;
struct mutex *lock = &ds1307->rtc->ops_lock;
int reg, ret;
mutex_lock(lock);
/* Check and clear alarm 0 interrupt flag. */
- reg = i2c_smbus_read_byte_data(client, MCP794XX_REG_ALARM0_CTRL);
- if (reg < 0)
+ ret = regmap_read(ds1307->regmap, MCP794XX_REG_ALARM0_CTRL, &reg);
+ if (ret)
goto out;
if (!(reg & MCP794XX_BIT_ALMX_IF))
goto out;
reg &= ~MCP794XX_BIT_ALMX_IF;
- ret = i2c_smbus_write_byte_data(client, MCP794XX_REG_ALARM0_CTRL, reg);
- if (ret < 0)
+ ret = regmap_write(ds1307->regmap, MCP794XX_REG_ALARM0_CTRL, reg);
+ if (ret)
goto out;
/* Disable alarm 0. */
- reg = i2c_smbus_read_byte_data(client, MCP794XX_REG_CONTROL);
- if (reg < 0)
- goto out;
- reg &= ~MCP794XX_BIT_ALM0_EN;
- ret = i2c_smbus_write_byte_data(client, MCP794XX_REG_CONTROL, reg);
- if (ret < 0)
+ ret = regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL,
+ MCP794XX_BIT_ALM0_EN, 0);
+ if (ret)
goto out;
rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
@@ -789,8 +792,7 @@ out:
static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ struct ds1307 *ds1307 = dev_get_drvdata(dev);
u8 *regs = ds1307->regs;
int ret;
@@ -798,8 +800,8 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
return -EINVAL;
/* Read control and alarm 0 registers. */
- ret = ds1307->read_block_data(client, MCP794XX_REG_CONTROL, 10, regs);
- if (ret < 0)
+ ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, 10);
+ if (ret)
return ret;
t->enabled = !!(regs[0] & MCP794XX_BIT_ALM0_EN);
@@ -828,8 +830,7 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ struct ds1307 *ds1307 = dev_get_drvdata(dev);
unsigned char *regs = ds1307->regs;
int ret;
@@ -843,8 +844,8 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t->enabled, t->pending);
/* Read control and alarm 0 registers. */
- ret = ds1307->read_block_data(client, MCP794XX_REG_CONTROL, 10, regs);
- if (ret < 0)
+ ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, 10);
+ if (ret)
return ret;
/* Set alarm 0, using 24-hour and day-of-month modes. */
@@ -862,35 +863,26 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
/* Disable interrupt. We will not enable until completely programmed */
regs[0] &= ~MCP794XX_BIT_ALM0_EN;
- ret = ds1307->write_block_data(client, MCP794XX_REG_CONTROL, 10, regs);
- if (ret < 0)
+ ret = regmap_bulk_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs, 10);
+ if (ret)
return ret;
if (!t->enabled)
return 0;
regs[0] |= MCP794XX_BIT_ALM0_EN;
- return i2c_smbus_write_byte_data(client, MCP794XX_REG_CONTROL, regs[0]);
+ return regmap_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs[0]);
}
static int mcp794xx_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1307 *ds1307 = i2c_get_clientdata(client);
- int reg;
+ struct ds1307 *ds1307 = dev_get_drvdata(dev);
if (!test_bit(HAS_ALARM, &ds1307->flags))
return -EINVAL;
- reg = i2c_smbus_read_byte_data(client, MCP794XX_REG_CONTROL);
- if (reg < 0)
- return reg;
-
- if (enabled)
- reg |= MCP794XX_BIT_ALM0_EN;
- else
- reg &= ~MCP794XX_BIT_ALM0_EN;
-
- return i2c_smbus_write_byte_data(client, MCP794XX_REG_CONTROL, reg);
+ return regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL,
+ MCP794XX_BIT_ALM0_EN,
+ enabled ? MCP794XX_BIT_ALM0_EN : 0);
}
static const struct rtc_class_ops mcp794xx_rtc_ops = {
@@ -903,50 +895,27 @@ static const struct rtc_class_ops mcp794xx_rtc_ops = {
/*----------------------------------------------------------------------*/
-static ssize_t
-ds1307_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int ds1307_nvram_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
{
- struct i2c_client *client;
- struct ds1307 *ds1307;
- int result;
+ struct ds1307 *ds1307 = priv;
- client = kobj_to_i2c_client(kobj);
- ds1307 = i2c_get_clientdata(client);
-
- result = ds1307->read_block_data(client, ds1307->nvram_offset + off,
- count, buf);
- if (result < 0)
- dev_err(&client->dev, "%s error %d\n", "nvram read", result);
- return result;
+ return regmap_bulk_read(ds1307->regmap, ds1307->nvram_offset + offset,
+ val, bytes);
}
-static ssize_t
-ds1307_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int ds1307_nvram_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
{
- struct i2c_client *client;
- struct ds1307 *ds1307;
- int result;
+ struct ds1307 *ds1307 = priv;
- client = kobj_to_i2c_client(kobj);
- ds1307 = i2c_get_clientdata(client);
-
- result = ds1307->write_block_data(client, ds1307->nvram_offset + off,
- count, buf);
- if (result < 0) {
- dev_err(&client->dev, "%s error %d\n", "nvram write", result);
- return result;
- }
- return count;
+ return regmap_bulk_write(ds1307->regmap, ds1307->nvram_offset + offset,
+ val, bytes);
}
-
/*----------------------------------------------------------------------*/
-static u8 do_trickle_setup_ds1339(struct i2c_client *client,
+static u8 do_trickle_setup_ds1339(struct ds1307 *ds1307,
uint32_t ohms, bool diode)
{
u8 setup = (diode) ? DS1307_TRICKLE_CHARGER_DIODE :
@@ -963,14 +932,14 @@ static u8 do_trickle_setup_ds1339(struct i2c_client *client,
setup |= DS1307_TRICKLE_CHARGER_4K_OHM;
break;
default:
- dev_warn(&client->dev,
+ dev_warn(ds1307->dev,
"Unsupported ohm value %u in dt\n", ohms);
return 0;
}
return setup;
}
-static void ds1307_trickle_init(struct i2c_client *client,
+static void ds1307_trickle_init(struct ds1307 *ds1307,
struct chip_desc *chip)
{
uint32_t ohms = 0;
@@ -978,11 +947,12 @@ static void ds1307_trickle_init(struct i2c_client *client,
if (!chip->do_trickle_setup)
goto out;
- if (device_property_read_u32(&client->dev, "trickle-resistor-ohms", &ohms))
+ if (device_property_read_u32(ds1307->dev, "trickle-resistor-ohms",
+ &ohms))
goto out;
- if (device_property_read_bool(&client->dev, "trickle-diode-disable"))
+ if (device_property_read_bool(ds1307->dev, "trickle-diode-disable"))
diode = false;
- chip->trickle_charger_setup = chip->do_trickle_setup(client,
+ chip->trickle_charger_setup = chip->do_trickle_setup(ds1307,
ohms, diode);
out:
return;
@@ -1009,13 +979,10 @@ static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
s16 temp;
int ret;
- ret = ds1307->read_block_data(ds1307->client, DS3231_REG_TEMPERATURE,
- sizeof(temp_buf), temp_buf);
- if (ret < 0)
+ ret = regmap_bulk_read(ds1307->regmap, DS3231_REG_TEMPERATURE,
+ temp_buf, sizeof(temp_buf));
+ if (ret)
return ret;
- if (ret != sizeof(temp_buf))
- return -EIO;
-
/*
* Temperature is represented as a 10-bit code with a resolution of
* 0.25 degree celsius and encoded in two's complement format.
@@ -1055,12 +1022,11 @@ static void ds1307_hwmon_register(struct ds1307 *ds1307)
if (ds1307->type != ds_3231)
return;
- dev = devm_hwmon_device_register_with_groups(&ds1307->client->dev,
- ds1307->client->name,
+ dev = devm_hwmon_device_register_with_groups(ds1307->dev, ds1307->name,
ds1307, ds3231_hwmon_groups);
if (IS_ERR(dev)) {
- dev_warn(&ds1307->client->dev,
- "unable to register hwmon device %ld\n", PTR_ERR(dev));
+ dev_warn(ds1307->dev, "unable to register hwmon device %ld\n",
+ PTR_ERR(dev));
}
}
@@ -1099,24 +1065,12 @@ static int ds3231_clk_sqw_rates[] = {
static int ds1337_write_control(struct ds1307 *ds1307, u8 mask, u8 value)
{
- struct i2c_client *client = ds1307->client;
struct mutex *lock = &ds1307->rtc->ops_lock;
- int control;
int ret;
mutex_lock(lock);
-
- control = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
- if (control < 0) {
- ret = control;
- goto out;
- }
-
- control &= ~mask;
- control |= value;
-
- ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, control);
-out:
+ ret = regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL,
+ mask, value);
mutex_unlock(lock);
return ret;
@@ -1126,12 +1080,12 @@ static unsigned long ds3231_clk_sqw_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
- int control;
+ int control, ret;
int rate_sel = 0;
- control = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_CONTROL);
- if (control < 0)
- return control;
+ ret = regmap_read(ds1307->regmap, DS1337_REG_CONTROL, &control);
+ if (ret)
+ return ret;
if (control & DS1337_BIT_RS1)
rate_sel += 1;
if (control & DS1337_BIT_RS2)
@@ -1195,11 +1149,11 @@ static void ds3231_clk_sqw_unprepare(struct clk_hw *hw)
static int ds3231_clk_sqw_is_prepared(struct clk_hw *hw)
{
struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
- int control;
+ int control, ret;
- control = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_CONTROL);
- if (control < 0)
- return control;
+ ret = regmap_read(ds1307->regmap, DS1337_REG_CONTROL, &control);
+ if (ret)
+ return ret;
return !(control & DS1337_BIT_INTCN);
}
@@ -1221,26 +1175,13 @@ static unsigned long ds3231_clk_32khz_recalc_rate(struct clk_hw *hw,
static int ds3231_clk_32khz_control(struct ds1307 *ds1307, bool enable)
{
- struct i2c_client *client = ds1307->client;
struct mutex *lock = &ds1307->rtc->ops_lock;
- int status;
int ret;
mutex_lock(lock);
-
- status = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS);
- if (status < 0) {
- ret = status;
- goto out;
- }
-
- if (enable)
- status |= DS3231_BIT_EN32KHZ;
- else
- status &= ~DS3231_BIT_EN32KHZ;
-
- ret = i2c_smbus_write_byte_data(client, DS1337_REG_STATUS, status);
-out:
+ ret = regmap_update_bits(ds1307->regmap, DS1337_REG_STATUS,
+ DS3231_BIT_EN32KHZ,
+ enable ? DS3231_BIT_EN32KHZ : 0);
mutex_unlock(lock);
return ret;
@@ -1263,11 +1204,11 @@ static void ds3231_clk_32khz_unprepare(struct clk_hw *hw)
static int ds3231_clk_32khz_is_prepared(struct clk_hw *hw)
{
struct ds1307 *ds1307 = clk_32khz_to_ds1307(hw);
- int status;
+ int status, ret;
- status = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_STATUS);
- if (status < 0)
- return status;
+ ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &status);
+ if (ret)
+ return ret;
return !!(status & DS3231_BIT_EN32KHZ);
}
@@ -1292,18 +1233,17 @@ static struct clk_init_data ds3231_clks_init[] = {
static int ds3231_clks_register(struct ds1307 *ds1307)
{
- struct i2c_client *client = ds1307->client;
- struct device_node *node = client->dev.of_node;
+ struct device_node *node = ds1307->dev->of_node;
struct clk_onecell_data *onecell;
int i;
- onecell = devm_kzalloc(&client->dev, sizeof(*onecell), GFP_KERNEL);
+ onecell = devm_kzalloc(ds1307->dev, sizeof(*onecell), GFP_KERNEL);
if (!onecell)
return -ENOMEM;
onecell->clk_num = ARRAY_SIZE(ds3231_clks_init);
- onecell->clks = devm_kcalloc(&client->dev, onecell->clk_num,
- sizeof(onecell->clks[0]), GFP_KERNEL);
+ onecell->clks = devm_kcalloc(ds1307->dev, onecell->clk_num,
+ sizeof(onecell->clks[0]), GFP_KERNEL);
if (!onecell->clks)
return -ENOMEM;
@@ -1322,8 +1262,8 @@ static int ds3231_clks_register(struct ds1307 *ds1307)
&init.name);
ds1307->clks[i].init = &init;
- onecell->clks[i] = devm_clk_register(&client->dev,
- &ds1307->clks[i]);
+ onecell->clks[i] = devm_clk_register(ds1307->dev,
+ &ds1307->clks[i]);
if (IS_ERR(onecell->clks[i]))
return PTR_ERR(onecell->clks[i]);
}
@@ -1345,8 +1285,8 @@ static void ds1307_clks_register(struct ds1307 *ds1307)
ret = ds3231_clks_register(ds1307);
if (ret) {
- dev_warn(&ds1307->client->dev,
- "unable to register clock device %d\n", ret);
+ dev_warn(ds1307->dev, "unable to register clock device %d\n",
+ ret);
}
}
@@ -1358,6 +1298,12 @@ static void ds1307_clks_register(struct ds1307 *ds1307)
#endif /* CONFIG_COMMON_CLK */
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x12,
+};
+
static int ds1307_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1365,7 +1311,6 @@ static int ds1307_probe(struct i2c_client *client,
int err = -ENODEV;
int tmp, wday;
struct chip_desc *chip;
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
bool want_irq = false;
bool ds1307_can_wakeup_device = false;
unsigned char *buf;
@@ -1382,17 +1327,22 @@ static int ds1307_probe(struct i2c_client *client,
};
const struct rtc_class_ops *rtc_ops = &ds13xx_rtc_ops;
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)
- && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
- return -EIO;
-
ds1307 = devm_kzalloc(&client->dev, sizeof(struct ds1307), GFP_KERNEL);
if (!ds1307)
return -ENOMEM;
- i2c_set_clientdata(client, ds1307);
+ dev_set_drvdata(&client->dev, ds1307);
+ ds1307->dev = &client->dev;
+ ds1307->name = client->name;
+ ds1307->irq = client->irq;
- ds1307->client = client;
+ ds1307->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(ds1307->regmap)) {
+ dev_err(ds1307->dev, "regmap allocation failed\n");
+ return PTR_ERR(ds1307->regmap);
+ }
+
+ i2c_set_clientdata(client, ds1307);
if (client->dev.of_node) {
ds1307->type = (enum ds_type)
@@ -1405,7 +1355,7 @@ static int ds1307_probe(struct i2c_client *client,
const struct acpi_device_id *acpi_id;
acpi_id = acpi_match_device(ACPI_PTR(ds1307_acpi_ids),
- &client->dev);
+ ds1307->dev);
if (!acpi_id)
return -ENODEV;
chip = &chips[acpi_id->driver_data];
@@ -1413,27 +1363,21 @@ static int ds1307_probe(struct i2c_client *client,
}
if (!pdata)
- ds1307_trickle_init(client, chip);
+ ds1307_trickle_init(ds1307, chip);
else if (pdata->trickle_charger_setup)
chip->trickle_charger_setup = pdata->trickle_charger_setup;
if (chip->trickle_charger_setup && chip->trickle_charger_reg) {
- dev_dbg(&client->dev, "writing trickle charger info 0x%x to 0x%x\n",
+ dev_dbg(ds1307->dev,
+ "writing trickle charger info 0x%x to 0x%x\n",
DS13XX_TRICKLE_CHARGER_MAGIC | chip->trickle_charger_setup,
chip->trickle_charger_reg);
- i2c_smbus_write_byte_data(client, chip->trickle_charger_reg,
+ regmap_write(ds1307->regmap, chip->trickle_charger_reg,
DS13XX_TRICKLE_CHARGER_MAGIC |
chip->trickle_charger_setup);
}
buf = ds1307->regs;
- if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
- ds1307->read_block_data = ds1307_native_smbus_read_block_data;
- ds1307->write_block_data = ds1307_native_smbus_write_block_data;
- } else {
- ds1307->read_block_data = ds1307_read_block_data;
- ds1307->write_block_data = ds1307_write_block_data;
- }
#ifdef CONFIG_OF
/*
@@ -1459,11 +1403,10 @@ static int ds1307_probe(struct i2c_client *client,
case ds_1339:
case ds_3231:
/* get registers that the "rtc" read below won't read... */
- tmp = ds1307->read_block_data(ds1307->client,
- DS1337_REG_CONTROL, 2, buf);
- if (tmp != 2) {
- dev_dbg(&client->dev, "read error %d\n", tmp);
- err = -EIO;
+ err = regmap_bulk_read(ds1307->regmap, DS1337_REG_CONTROL,
+ buf, 2);
+ if (err) {
+ dev_dbg(ds1307->dev, "read error %d\n", err);
goto exit;
}
@@ -1477,8 +1420,8 @@ static int ds1307_probe(struct i2c_client *client,
* For some variants, be sure alarms can trigger when we're
* running on Vbackup (BBSQI/BBSQW)
*/
- if (chip->alarm && (ds1307->client->irq > 0 ||
- ds1307_can_wakeup_device)) {
+ if (chip->alarm && (ds1307->irq > 0 ||
+ ds1307_can_wakeup_device)) {
ds1307->regs[0] |= DS1337_BIT_INTCN
| bbsqi_bitpos[ds1307->type];
ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
@@ -1486,50 +1429,49 @@ static int ds1307_probe(struct i2c_client *client,
want_irq = true;
}
- i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL,
- ds1307->regs[0]);
+ regmap_write(ds1307->regmap, DS1337_REG_CONTROL,
+ ds1307->regs[0]);
/* oscillator fault? clear flag, and warn */
if (ds1307->regs[1] & DS1337_BIT_OSF) {
- i2c_smbus_write_byte_data(client, DS1337_REG_STATUS,
- ds1307->regs[1] & ~DS1337_BIT_OSF);
- dev_warn(&client->dev, "SET TIME!\n");
+ regmap_write(ds1307->regmap, DS1337_REG_STATUS,
+ ds1307->regs[1] & ~DS1337_BIT_OSF);
+ dev_warn(ds1307->dev, "SET TIME!\n");
}
break;
case rx_8025:
- tmp = i2c_smbus_read_i2c_block_data(ds1307->client,
- RX8025_REG_CTRL1 << 4 | 0x08, 2, buf);
- if (tmp != 2) {
- dev_dbg(&client->dev, "read error %d\n", tmp);
- err = -EIO;
+ err = regmap_bulk_read(ds1307->regmap,
+ RX8025_REG_CTRL1 << 4 | 0x08, buf, 2);
+ if (err) {
+ dev_dbg(ds1307->dev, "read error %d\n", err);
goto exit;
}
/* oscillator off? turn it on, so clock can tick. */
if (!(ds1307->regs[1] & RX8025_BIT_XST)) {
ds1307->regs[1] |= RX8025_BIT_XST;
- i2c_smbus_write_byte_data(client,
- RX8025_REG_CTRL2 << 4 | 0x08,
- ds1307->regs[1]);
- dev_warn(&client->dev,
+ regmap_write(ds1307->regmap,
+ RX8025_REG_CTRL2 << 4 | 0x08,
+ ds1307->regs[1]);
+ dev_warn(ds1307->dev,
"oscillator stop detected - SET TIME!\n");
}
if (ds1307->regs[1] & RX8025_BIT_PON) {
ds1307->regs[1] &= ~RX8025_BIT_PON;
- i2c_smbus_write_byte_data(client,
- RX8025_REG_CTRL2 << 4 | 0x08,
- ds1307->regs[1]);
- dev_warn(&client->dev, "power-on detected\n");
+ regmap_write(ds1307->regmap,
+ RX8025_REG_CTRL2 << 4 | 0x08,
+ ds1307->regs[1]);
+ dev_warn(ds1307->dev, "power-on detected\n");
}
if (ds1307->regs[1] & RX8025_BIT_VDET) {
ds1307->regs[1] &= ~RX8025_BIT_VDET;
- i2c_smbus_write_byte_data(client,
- RX8025_REG_CTRL2 << 4 | 0x08,
- ds1307->regs[1]);
- dev_warn(&client->dev, "voltage drop detected\n");
+ regmap_write(ds1307->regmap,
+ RX8025_REG_CTRL2 << 4 | 0x08,
+ ds1307->regs[1]);
+ dev_warn(ds1307->dev, "voltage drop detected\n");
}
/* make sure we are running in 24hour mode */
@@ -1537,16 +1479,15 @@ static int ds1307_probe(struct i2c_client *client,
u8 hour;
/* switch to 24 hour mode */
- i2c_smbus_write_byte_data(client,
- RX8025_REG_CTRL1 << 4 | 0x08,
- ds1307->regs[0] |
- RX8025_BIT_2412);
-
- tmp = i2c_smbus_read_i2c_block_data(ds1307->client,
- RX8025_REG_CTRL1 << 4 | 0x08, 2, buf);
- if (tmp != 2) {
- dev_dbg(&client->dev, "read error %d\n", tmp);
- err = -EIO;
+ regmap_write(ds1307->regmap,
+ RX8025_REG_CTRL1 << 4 | 0x08,
+ ds1307->regs[0] | RX8025_BIT_2412);
+
+ err = regmap_bulk_read(ds1307->regmap,
+ RX8025_REG_CTRL1 << 4 | 0x08,
+ buf, 2);
+ if (err) {
+ dev_dbg(ds1307->dev, "read error %d\n", err);
goto exit;
}
@@ -1557,9 +1498,16 @@ static int ds1307_probe(struct i2c_client *client,
if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
hour += 12;
- i2c_smbus_write_byte_data(client,
- DS1307_REG_HOUR << 4 | 0x08,
- hour);
+ regmap_write(ds1307->regmap,
+ DS1307_REG_HOUR << 4 | 0x08, hour);
+ }
+ break;
+ case rx_8130:
+ ds1307->offset = 0x10; /* Seconds starts at 0x10 */
+ rtc_ops = &rx8130_rtc_ops;
+ if (chip->alarm && ds1307->irq > 0) {
+ irq_handler = rx8130_irq;
+ want_irq = true;
}
break;
case ds_1388:
@@ -1567,7 +1515,8 @@ static int ds1307_probe(struct i2c_client *client,
break;
case mcp794xx:
rtc_ops = &mcp794xx_rtc_ops;
- if (ds1307->client->irq > 0 && chip->alarm) {
+ if (chip->alarm && (ds1307->irq > 0 ||
+ ds1307_can_wakeup_device)) {
irq_handler = mcp794xx_irq;
want_irq = true;
}
@@ -1578,10 +1527,9 @@ static int ds1307_probe(struct i2c_client *client,
read_rtc:
/* read RTC registers */
- tmp = ds1307->read_block_data(ds1307->client, ds1307->offset, 8, buf);
- if (tmp != 8) {
- dev_dbg(&client->dev, "read error %d\n", tmp);
- err = -EIO;
+ err = regmap_bulk_read(ds1307->regmap, ds1307->offset, buf, 8);
+ if (err) {
+ dev_dbg(ds1307->dev, "read error %d\n", err);
goto exit;
}
@@ -1597,56 +1545,56 @@ read_rtc:
case m41t00:
/* clock halted? turn it on, so clock can tick. */
if (tmp & DS1307_BIT_CH) {
- i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
- dev_warn(&client->dev, "SET TIME!\n");
+ regmap_write(ds1307->regmap, DS1307_REG_SECS, 0);
+ dev_warn(ds1307->dev, "SET TIME!\n");
goto read_rtc;
}
break;
+ case ds_1308:
case ds_1338:
/* clock halted? turn it on, so clock can tick. */
if (tmp & DS1307_BIT_CH)
- i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
+ regmap_write(ds1307->regmap, DS1307_REG_SECS, 0);
/* oscillator fault? clear flag, and warn */
if (ds1307->regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) {
- i2c_smbus_write_byte_data(client, DS1307_REG_CONTROL,
- ds1307->regs[DS1307_REG_CONTROL]
- & ~DS1338_BIT_OSF);
- dev_warn(&client->dev, "SET TIME!\n");
+ regmap_write(ds1307->regmap, DS1307_REG_CONTROL,
+ ds1307->regs[DS1307_REG_CONTROL] &
+ ~DS1338_BIT_OSF);
+ dev_warn(ds1307->dev, "SET TIME!\n");
goto read_rtc;
}
break;
case ds_1340:
/* clock halted? turn it on, so clock can tick. */
if (tmp & DS1340_BIT_nEOSC)
- i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
+ regmap_write(ds1307->regmap, DS1307_REG_SECS, 0);
- tmp = i2c_smbus_read_byte_data(client, DS1340_REG_FLAG);
- if (tmp < 0) {
- dev_dbg(&client->dev, "read error %d\n", tmp);
- err = -EIO;
+ err = regmap_read(ds1307->regmap, DS1340_REG_FLAG, &tmp);
+ if (err) {
+ dev_dbg(ds1307->dev, "read error %d\n", err);
goto exit;
}
/* oscillator fault? clear flag, and warn */
if (tmp & DS1340_BIT_OSF) {
- i2c_smbus_write_byte_data(client, DS1340_REG_FLAG, 0);
- dev_warn(&client->dev, "SET TIME!\n");
+ regmap_write(ds1307->regmap, DS1340_REG_FLAG, 0);
+ dev_warn(ds1307->dev, "SET TIME!\n");
}
break;
case mcp794xx:
/* make sure that the backup battery is enabled */
if (!(ds1307->regs[DS1307_REG_WDAY] & MCP794XX_BIT_VBATEN)) {
- i2c_smbus_write_byte_data(client, DS1307_REG_WDAY,
- ds1307->regs[DS1307_REG_WDAY]
- | MCP794XX_BIT_VBATEN);
+ regmap_write(ds1307->regmap, DS1307_REG_WDAY,
+ ds1307->regs[DS1307_REG_WDAY] |
+ MCP794XX_BIT_VBATEN);
}
/* clock halted? turn it on, so clock can tick. */
if (!(tmp & MCP794XX_BIT_ST)) {
- i2c_smbus_write_byte_data(client, DS1307_REG_SECS,
- MCP794XX_BIT_ST);
- dev_warn(&client->dev, "SET TIME!\n");
+ regmap_write(ds1307->regmap, DS1307_REG_SECS,
+ MCP794XX_BIT_ST);
+ dev_warn(ds1307->dev, "SET TIME!\n");
goto read_rtc;
}
@@ -1680,16 +1628,15 @@ read_rtc:
tmp = 0;
if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
tmp += 12;
- i2c_smbus_write_byte_data(client,
- ds1307->offset + DS1307_REG_HOUR,
- bin2bcd(tmp));
+ regmap_write(ds1307->regmap, ds1307->offset + DS1307_REG_HOUR,
+ bin2bcd(tmp));
}
/*
* Some IPs have weekday reset value = 0x1 which might not correct
* hence compute the wday using the current date/month/year values
*/
- ds1307_get_time(&client->dev, &tm);
+ ds1307_get_time(ds1307->dev, &tm);
wday = tm.tm_wday;
timestamp = rtc_tm_to_time64(&tm);
rtc_time64_to_tm(timestamp, &tm);
@@ -1699,78 +1646,63 @@ read_rtc:
* If different then set the wday which we computed using
* timestamp
*/
- if (wday != tm.tm_wday) {
- wday = i2c_smbus_read_byte_data(client, MCP794XX_REG_WEEKDAY);
- wday = wday & ~MCP794XX_REG_WEEKDAY_WDAY_MASK;
- wday = wday | (tm.tm_wday + 1);
- i2c_smbus_write_byte_data(client, MCP794XX_REG_WEEKDAY, wday);
- }
+ if (wday != tm.tm_wday)
+ regmap_update_bits(ds1307->regmap, MCP794XX_REG_WEEKDAY,
+ MCP794XX_REG_WEEKDAY_WDAY_MASK,
+ tm.tm_wday + 1);
if (want_irq) {
- device_set_wakeup_capable(&client->dev, true);
+ device_set_wakeup_capable(ds1307->dev, true);
set_bit(HAS_ALARM, &ds1307->flags);
}
- ds1307->rtc = devm_rtc_device_register(&client->dev, client->name,
- rtc_ops, THIS_MODULE);
+
+ ds1307->rtc = devm_rtc_allocate_device(ds1307->dev);
if (IS_ERR(ds1307->rtc)) {
return PTR_ERR(ds1307->rtc);
}
- if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
+ if (ds1307_can_wakeup_device && ds1307->irq <= 0) {
/* Disable request for an IRQ */
want_irq = false;
- dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
+ dev_info(ds1307->dev,
+ "'wakeup-source' is set, request for an IRQ is disabled!\n");
/* We cannot support UIE mode if we do not have an IRQ line */
ds1307->rtc->uie_unsupported = 1;
}
if (want_irq) {
- err = devm_request_threaded_irq(&client->dev,
- client->irq, NULL, irq_handler,
+ err = devm_request_threaded_irq(ds1307->dev,
+ ds1307->irq, NULL, irq_handler,
IRQF_SHARED | IRQF_ONESHOT,
- ds1307->rtc->name, client);
+ ds1307->name, ds1307);
if (err) {
client->irq = 0;
- device_set_wakeup_capable(&client->dev, false);
+ device_set_wakeup_capable(ds1307->dev, false);
clear_bit(HAS_ALARM, &ds1307->flags);
- dev_err(&client->dev, "unable to request IRQ!\n");
+ dev_err(ds1307->dev, "unable to request IRQ!\n");
} else
- dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
+ dev_dbg(ds1307->dev, "got IRQ %d\n", client->irq);
}
if (chip->nvram_size) {
-
- ds1307->nvram = devm_kzalloc(&client->dev,
- sizeof(struct bin_attribute),
- GFP_KERNEL);
- if (!ds1307->nvram) {
- dev_err(&client->dev, "cannot allocate memory for nvram sysfs\n");
- } else {
-
- ds1307->nvram->attr.name = "nvram";
- ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR;
-
- sysfs_bin_attr_init(ds1307->nvram);
-
- ds1307->nvram->read = ds1307_nvram_read;
- ds1307->nvram->write = ds1307_nvram_write;
- ds1307->nvram->size = chip->nvram_size;
- ds1307->nvram_offset = chip->nvram_offset;
-
- err = sysfs_create_bin_file(&client->dev.kobj,
- ds1307->nvram);
- if (err) {
- dev_err(&client->dev,
- "unable to create sysfs file: %s\n",
- ds1307->nvram->attr.name);
- } else {
- set_bit(HAS_NVRAM, &ds1307->flags);
- dev_info(&client->dev, "%zu bytes nvram\n",
- ds1307->nvram->size);
- }
- }
+ ds1307->nvmem_cfg.name = "ds1307_nvram";
+ ds1307->nvmem_cfg.word_size = 1;
+ ds1307->nvmem_cfg.stride = 1;
+ ds1307->nvmem_cfg.size = chip->nvram_size;
+ ds1307->nvmem_cfg.reg_read = ds1307_nvram_read;
+ ds1307->nvmem_cfg.reg_write = ds1307_nvram_write;
+ ds1307->nvmem_cfg.priv = ds1307;
+ ds1307->nvram_offset = chip->nvram_offset;
+
+ ds1307->rtc->nvmem_config = &ds1307->nvmem_cfg;
+ ds1307->rtc->nvram_old_abi = true;
}
+ ds1307->rtc->ops = rtc_ops;
+ err = rtc_register_device(ds1307->rtc);
+ if (err)
+ return err;
+
ds1307_hwmon_register(ds1307);
ds1307_clks_register(ds1307);
@@ -1780,16 +1712,6 @@ exit:
return err;
}
-static int ds1307_remove(struct i2c_client *client)
-{
- struct ds1307 *ds1307 = i2c_get_clientdata(client);
-
- if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags))
- sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram);
-
- return 0;
-}
-
static struct i2c_driver ds1307_driver = {
.driver = {
.name = "rtc-ds1307",
@@ -1797,7 +1719,6 @@ static struct i2c_driver ds1307_driver = {
.acpi_match_table = ACPI_PTR(ds1307_acpi_ids),
},
.probe = ds1307_probe,
- .remove = ds1307_remove,
.id_table = ds1307_id,
};
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index deff431a37c4..0550f7ba464f 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -22,6 +22,7 @@
#include <linux/bcd.h>
#include <linux/slab.h>
#include <linux/regmap.h>
+#include <linux/hwmon.h>
#define DS3232_REG_SECONDS 0x00
#define DS3232_REG_MINUTES 0x01
@@ -46,6 +47,8 @@
# define DS3232_REG_SR_A2F 0x02
# define DS3232_REG_SR_A1F 0x01
+#define DS3232_REG_TEMPERATURE 0x11
+
struct ds3232 {
struct device *dev;
struct regmap *regmap;
@@ -275,6 +278,120 @@ static int ds3232_update_alarm(struct device *dev, unsigned int enabled)
return ret;
}
+/*
+ * Temperature sensor support for ds3232/ds3234 devices.
+ * A user-initiated temperature conversion is not started by this function,
+ * so the temperature is updated once every 64 seconds.
+ */
+static int ds3232_hwmon_read_temp(struct device *dev, long int *mC)
+{
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
+ u8 temp_buf[2];
+ s16 temp;
+ int ret;
+
+ ret = regmap_bulk_read(ds3232->regmap, DS3232_REG_TEMPERATURE, temp_buf,
+ sizeof(temp_buf));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Temperature is represented as a 10-bit code with a resolution of
+ * 0.25 degree celsius and encoded in two's complement format.
+ */
+ temp = (temp_buf[0] << 8) | temp_buf[1];
+ temp >>= 6;
+ *mC = temp * 250;
+
+ return 0;
+}
+
+static umode_t ds3232_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int ds3232_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = ds3232_hwmon_read_temp(dev, temp);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static u32 ds3232_hwmon_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info ds3232_hwmon_chip = {
+ .type = hwmon_chip,
+ .config = ds3232_hwmon_chip_config,
+};
+
+static u32 ds3232_hwmon_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info ds3232_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = ds3232_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *ds3232_hwmon_info[] = {
+ &ds3232_hwmon_chip,
+ &ds3232_hwmon_temp,
+ NULL
+};
+
+static const struct hwmon_ops ds3232_hwmon_hwmon_ops = {
+ .is_visible = ds3232_hwmon_is_visible,
+ .read = ds3232_hwmon_read,
+};
+
+static const struct hwmon_chip_info ds3232_hwmon_chip_info = {
+ .ops = &ds3232_hwmon_hwmon_ops,
+ .info = ds3232_hwmon_info,
+};
+
+static void ds3232_hwmon_register(struct device *dev, const char *name)
+{
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
+ struct device *hwmon_dev;
+
+ if (!IS_ENABLED(CONFIG_RTC_DRV_DS3232_HWMON))
+ return;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, name, ds3232,
+ &ds3232_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(dev, "unable to register hwmon device %ld\n",
+ PTR_ERR(hwmon_dev));
+ }
+}
+
static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds3232 *ds3232 = dev_get_drvdata(dev);
@@ -366,6 +483,8 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
if (ds3232->irq > 0)
device_init_wakeup(dev, 1);
+ ds3232_hwmon_register(dev, name);
+
ds3232->rtc = devm_rtc_device_register(dev, name, &ds3232_rtc_ops,
THIS_MODULE);
if (IS_ERR(ds3232->rtc))
diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-ftrtc010.c
index 5279390bb42d..af8d6beae20c 100644
--- a/drivers/rtc/rtc-gemini.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -1,5 +1,5 @@
/*
- * Gemini OnChip RTC
+ * Faraday Technology FTRTC010 driver
*
* Copyright (C) 2009 Janos Laube <janos.dev@gmail.com>
*
@@ -26,33 +26,36 @@
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/clk.h>
-#define DRV_NAME "rtc-gemini"
+#define DRV_NAME "rtc-ftrtc010"
MODULE_AUTHOR("Hans Ulli Kroll <ulli.kroll@googlemail.com>");
MODULE_DESCRIPTION("RTC driver for Gemini SoC");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
-struct gemini_rtc {
+struct ftrtc010_rtc {
struct rtc_device *rtc_dev;
void __iomem *rtc_base;
int rtc_irq;
+ struct clk *pclk;
+ struct clk *extclk;
};
-enum gemini_rtc_offsets {
- GEMINI_RTC_SECOND = 0x00,
- GEMINI_RTC_MINUTE = 0x04,
- GEMINI_RTC_HOUR = 0x08,
- GEMINI_RTC_DAYS = 0x0C,
- GEMINI_RTC_ALARM_SECOND = 0x10,
- GEMINI_RTC_ALARM_MINUTE = 0x14,
- GEMINI_RTC_ALARM_HOUR = 0x18,
- GEMINI_RTC_RECORD = 0x1C,
- GEMINI_RTC_CR = 0x20
+enum ftrtc010_rtc_offsets {
+ FTRTC010_RTC_SECOND = 0x00,
+ FTRTC010_RTC_MINUTE = 0x04,
+ FTRTC010_RTC_HOUR = 0x08,
+ FTRTC010_RTC_DAYS = 0x0C,
+ FTRTC010_RTC_ALARM_SECOND = 0x10,
+ FTRTC010_RTC_ALARM_MINUTE = 0x14,
+ FTRTC010_RTC_ALARM_HOUR = 0x18,
+ FTRTC010_RTC_RECORD = 0x1C,
+ FTRTC010_RTC_CR = 0x20,
};
-static irqreturn_t gemini_rtc_interrupt(int irq, void *dev)
+static irqreturn_t ftrtc010_rtc_interrupt(int irq, void *dev)
{
return IRQ_HANDLED;
}
@@ -66,18 +69,18 @@ static irqreturn_t gemini_rtc_interrupt(int irq, void *dev)
* the same thing, without the rtc-lib.c calls.
*/
-static int gemini_rtc_read_time(struct device *dev, struct rtc_time *tm)
+static int ftrtc010_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct gemini_rtc *rtc = dev_get_drvdata(dev);
+ struct ftrtc010_rtc *rtc = dev_get_drvdata(dev);
unsigned int days, hour, min, sec;
unsigned long offset, time;
- sec = readl(rtc->rtc_base + GEMINI_RTC_SECOND);
- min = readl(rtc->rtc_base + GEMINI_RTC_MINUTE);
- hour = readl(rtc->rtc_base + GEMINI_RTC_HOUR);
- days = readl(rtc->rtc_base + GEMINI_RTC_DAYS);
- offset = readl(rtc->rtc_base + GEMINI_RTC_RECORD);
+ sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND);
+ min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE);
+ hour = readl(rtc->rtc_base + FTRTC010_RTC_HOUR);
+ days = readl(rtc->rtc_base + FTRTC010_RTC_DAYS);
+ offset = readl(rtc->rtc_base + FTRTC010_RTC_RECORD);
time = offset + days * 86400 + hour * 3600 + min * 60 + sec;
@@ -86,9 +89,9 @@ static int gemini_rtc_read_time(struct device *dev, struct rtc_time *tm)
return 0;
}
-static int gemini_rtc_set_time(struct device *dev, struct rtc_time *tm)
+static int ftrtc010_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct gemini_rtc *rtc = dev_get_drvdata(dev);
+ struct ftrtc010_rtc *rtc = dev_get_drvdata(dev);
unsigned int sec, min, hour, day;
unsigned long offset, time;
@@ -97,27 +100,27 @@ static int gemini_rtc_set_time(struct device *dev, struct rtc_time *tm)
rtc_tm_to_time(tm, &time);
- sec = readl(rtc->rtc_base + GEMINI_RTC_SECOND);
- min = readl(rtc->rtc_base + GEMINI_RTC_MINUTE);
- hour = readl(rtc->rtc_base + GEMINI_RTC_HOUR);
- day = readl(rtc->rtc_base + GEMINI_RTC_DAYS);
+ sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND);
+ min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE);
+ hour = readl(rtc->rtc_base + FTRTC010_RTC_HOUR);
+ day = readl(rtc->rtc_base + FTRTC010_RTC_DAYS);
offset = time - (day * 86400 + hour * 3600 + min * 60 + sec);
- writel(offset, rtc->rtc_base + GEMINI_RTC_RECORD);
- writel(0x01, rtc->rtc_base + GEMINI_RTC_CR);
+ writel(offset, rtc->rtc_base + FTRTC010_RTC_RECORD);
+ writel(0x01, rtc->rtc_base + FTRTC010_RTC_CR);
return 0;
}
-static const struct rtc_class_ops gemini_rtc_ops = {
- .read_time = gemini_rtc_read_time,
- .set_time = gemini_rtc_set_time,
+static const struct rtc_class_ops ftrtc010_rtc_ops = {
+ .read_time = ftrtc010_rtc_read_time,
+ .set_time = ftrtc010_rtc_set_time,
};
-static int gemini_rtc_probe(struct platform_device *pdev)
+static int ftrtc010_rtc_probe(struct platform_device *pdev)
{
- struct gemini_rtc *rtc;
+ struct ftrtc010_rtc *rtc;
struct device *dev = &pdev->dev;
struct resource *res;
int ret;
@@ -127,6 +130,27 @@ static int gemini_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, rtc);
+ rtc->pclk = devm_clk_get(dev, "PCLK");
+ if (IS_ERR(rtc->pclk)) {
+ dev_err(dev, "could not get PCLK\n");
+ } else {
+ ret = clk_prepare_enable(rtc->pclk);
+ if (ret) {
+ dev_err(dev, "failed to enable PCLK\n");
+ return ret;
+ }
+ }
+ rtc->extclk = devm_clk_get(dev, "EXTCLK");
+ if (IS_ERR(rtc->extclk)) {
+ dev_err(dev, "could not get EXTCLK\n");
+ } else {
+ ret = clk_prepare_enable(rtc->extclk);
+ if (ret) {
+ dev_err(dev, "failed to enable EXTCLK\n");
+ return ret;
+ }
+ }
+
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res)
return -ENODEV;
@@ -142,38 +166,43 @@ static int gemini_rtc_probe(struct platform_device *pdev)
if (!rtc->rtc_base)
return -ENOMEM;
- ret = devm_request_irq(dev, rtc->rtc_irq, gemini_rtc_interrupt,
+ ret = devm_request_irq(dev, rtc->rtc_irq, ftrtc010_rtc_interrupt,
IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
return ret;
rtc->rtc_dev = rtc_device_register(pdev->name, dev,
- &gemini_rtc_ops, THIS_MODULE);
+ &ftrtc010_rtc_ops, THIS_MODULE);
return PTR_ERR_OR_ZERO(rtc->rtc_dev);
}
-static int gemini_rtc_remove(struct platform_device *pdev)
+static int ftrtc010_rtc_remove(struct platform_device *pdev)
{
- struct gemini_rtc *rtc = platform_get_drvdata(pdev);
+ struct ftrtc010_rtc *rtc = platform_get_drvdata(pdev);
+ if (!IS_ERR(rtc->extclk))
+ clk_disable_unprepare(rtc->extclk);
+ if (!IS_ERR(rtc->pclk))
+ clk_disable_unprepare(rtc->pclk);
rtc_device_unregister(rtc->rtc_dev);
return 0;
}
-static const struct of_device_id gemini_rtc_dt_match[] = {
+static const struct of_device_id ftrtc010_rtc_dt_match[] = {
{ .compatible = "cortina,gemini-rtc" },
+ { .compatible = "faraday,ftrtc010" },
{ }
};
-MODULE_DEVICE_TABLE(of, gemini_rtc_dt_match);
+MODULE_DEVICE_TABLE(of, ftrtc010_rtc_dt_match);
-static struct platform_driver gemini_rtc_driver = {
+static struct platform_driver ftrtc010_rtc_driver = {
.driver = {
.name = DRV_NAME,
- .of_match_table = gemini_rtc_dt_match,
+ .of_match_table = ftrtc010_rtc_dt_match,
},
- .probe = gemini_rtc_probe,
- .remove = gemini_rtc_remove,
+ .probe = ftrtc010_rtc_probe,
+ .remove = ftrtc010_rtc_remove,
};
-module_platform_driver_probe(gemini_rtc_driver, gemini_rtc_probe);
+module_platform_driver_probe(ftrtc010_rtc_driver, ftrtc010_rtc_probe);
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 5ec4653022ff..8940e9e43ea0 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -16,6 +16,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bcd.h>
+#include <linux/clk-provider.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -53,6 +54,8 @@
#define M41T80_ALARM_REG_SIZE \
(M41T80_REG_ALARM_SEC + 1 - M41T80_REG_ALARM_MON)
+#define M41T80_SQW_MAX_FREQ 32768
+
#define M41T80_SEC_ST BIT(7) /* ST: Stop Bit */
#define M41T80_ALMON_AFE BIT(7) /* AFE: AF Enable Bit */
#define M41T80_ALMON_SQWE BIT(6) /* SQWE: SQW Enable Bit */
@@ -147,7 +150,11 @@ MODULE_DEVICE_TABLE(of, m41t80_of_match);
struct m41t80_data {
unsigned long features;
+ struct i2c_client *client;
struct rtc_device *rtc;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw sqw;
+#endif
};
static irqreturn_t m41t80_handle_irq(int irq, void *dev_id)
@@ -227,6 +234,7 @@ static int m41t80_get_datetime(struct i2c_client *client,
/* Sets the given date and time to the real time clock. */
static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
+ struct m41t80_data *clientdata = i2c_get_clientdata(client);
unsigned char buf[8];
int err, flags;
@@ -242,6 +250,17 @@ static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year - 100);
buf[M41T80_REG_WDAY] = tm->tm_wday;
+ /* If the square wave output is controlled in the weekday register */
+ if (clientdata->features & M41T80_FEATURE_SQ_ALT) {
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, M41T80_REG_WDAY);
+ if (val < 0)
+ return val;
+
+ buf[M41T80_REG_WDAY] |= (val & 0xf0);
+ }
+
err = i2c_smbus_write_i2c_block_data(client, M41T80_REG_SSEC,
sizeof(buf), buf);
if (err < 0) {
@@ -332,6 +351,9 @@ static int m41t80_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return err;
}
+ /* Keep SQWE bit value */
+ alarmvals[0] |= (ret & M41T80_ALMON_SQWE);
+
ret = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
if (ret < 0)
return ret;
@@ -431,103 +453,175 @@ static ssize_t flags_show(struct device *dev,
}
static DEVICE_ATTR_RO(flags);
-static ssize_t sqwfreq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static struct attribute *attrs[] = {
+ &dev_attr_flags.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+#ifdef CONFIG_COMMON_CLK
+#define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw)
+
+static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct m41t80_data *clientdata = i2c_get_clientdata(client);
- int val, reg_sqw;
+ struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
+ struct i2c_client *client = m41t80->client;
+ int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ?
+ M41T80_REG_WDAY : M41T80_REG_SQW;
+ int ret = i2c_smbus_read_byte_data(client, reg_sqw);
+ unsigned long val = M41T80_SQW_MAX_FREQ;
- if (!(clientdata->features & M41T80_FEATURE_SQ))
- return -EINVAL;
+ if (ret < 0)
+ return 0;
- reg_sqw = M41T80_REG_SQW;
- if (clientdata->features & M41T80_FEATURE_SQ_ALT)
- reg_sqw = M41T80_REG_WDAY;
- val = i2c_smbus_read_byte_data(client, reg_sqw);
- if (val < 0)
- return val;
- val = (val >> 4) & 0xf;
- switch (val) {
- case 0:
- break;
- case 1:
- val = 32768;
- break;
- default:
- val = 32768 >> val;
- }
- return sprintf(buf, "%d\n", val);
+ ret >>= 4;
+ if (ret == 0)
+ val = 0;
+ else if (ret > 1)
+ val = val / (1 << ret);
+
+ return val;
}
-static ssize_t sqwfreq_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct m41t80_data *clientdata = i2c_get_clientdata(client);
- int almon, sqw, reg_sqw, rc;
- unsigned long val;
+ int i, freq = M41T80_SQW_MAX_FREQ;
- rc = kstrtoul(buf, 0, &val);
- if (rc < 0)
- return rc;
+ if (freq <= rate)
+ return freq;
- if (!(clientdata->features & M41T80_FEATURE_SQ))
- return -EINVAL;
+ for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) {
+ freq /= 1 << i;
+ if (freq <= rate)
+ return freq;
+ }
- if (val) {
- if (!is_power_of_2(val))
+ return 0;
+}
+
+static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
+ struct i2c_client *client = m41t80->client;
+ int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ?
+ M41T80_REG_WDAY : M41T80_REG_SQW;
+ int reg, ret, val = 0;
+
+ if (rate) {
+ if (!is_power_of_2(rate))
return -EINVAL;
- val = ilog2(val);
- if (val == 15)
+ val = ilog2(rate);
+ if (val == ilog2(M41T80_SQW_MAX_FREQ))
val = 1;
- else if (val < 14)
- val = 15 - val;
+ else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1))
+ val = ilog2(M41T80_SQW_MAX_FREQ) - val;
else
return -EINVAL;
}
- /* disable SQW, set SQW frequency & re-enable */
- almon = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
- if (almon < 0)
- return almon;
- reg_sqw = M41T80_REG_SQW;
- if (clientdata->features & M41T80_FEATURE_SQ_ALT)
- reg_sqw = M41T80_REG_WDAY;
- sqw = i2c_smbus_read_byte_data(client, reg_sqw);
- if (sqw < 0)
- return sqw;
- sqw = (sqw & 0x0f) | (val << 4);
-
- rc = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
- almon & ~M41T80_ALMON_SQWE);
- if (rc < 0)
- return rc;
- if (val) {
- rc = i2c_smbus_write_byte_data(client, reg_sqw, sqw);
- if (rc < 0)
- return rc;
+ reg = i2c_smbus_read_byte_data(client, reg_sqw);
+ if (reg < 0)
+ return reg;
- rc = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
- almon | M41T80_ALMON_SQWE);
- if (rc < 0)
- return rc;
- }
- return count;
+ reg = (reg & 0x0f) | (val << 4);
+
+ ret = i2c_smbus_write_byte_data(client, reg_sqw, reg);
+ if (ret < 0)
+ return ret;
+
+ return -EINVAL;
}
-static DEVICE_ATTR_RW(sqwfreq);
-static struct attribute *attrs[] = {
- &dev_attr_flags.attr,
- &dev_attr_sqwfreq.attr,
- NULL,
-};
+static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
+{
+ struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
+ struct i2c_client *client = m41t80->client;
+ int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
-static struct attribute_group attr_group = {
- .attrs = attrs,
+ if (ret < 0)
+ return ret;
+
+ if (enable)
+ ret |= M41T80_ALMON_SQWE;
+ else
+ ret &= ~M41T80_ALMON_SQWE;
+
+ return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
+}
+
+static int m41t80_sqw_prepare(struct clk_hw *hw)
+{
+ return m41t80_sqw_control(hw, 1);
+}
+
+static void m41t80_sqw_unprepare(struct clk_hw *hw)
+{
+ m41t80_sqw_control(hw, 0);
+}
+
+static int m41t80_sqw_is_prepared(struct clk_hw *hw)
+{
+ struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
+ struct i2c_client *client = m41t80->client;
+ int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
+
+ if (ret < 0)
+ return ret;
+
+ return !!(ret & M41T80_ALMON_SQWE);
+}
+
+static const struct clk_ops m41t80_sqw_ops = {
+ .prepare = m41t80_sqw_prepare,
+ .unprepare = m41t80_sqw_unprepare,
+ .is_prepared = m41t80_sqw_is_prepared,
+ .recalc_rate = m41t80_sqw_recalc_rate,
+ .round_rate = m41t80_sqw_round_rate,
+ .set_rate = m41t80_sqw_set_rate,
};
+static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80)
+{
+ struct i2c_client *client = m41t80->client;
+ struct device_node *node = client->dev.of_node;
+ struct clk *clk;
+ struct clk_init_data init;
+ int ret;
+
+ /* First disable the clock */
+ ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+ ret & ~(M41T80_ALMON_SQWE));
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ init.name = "m41t80-sqw";
+ init.ops = &m41t80_sqw_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ m41t80->sqw.init = &init;
+
+ /* optional override of the clockname */
+ of_property_read_string(node, "clock-output-names", &init.name);
+
+ /* register the clock */
+ clk = clk_register(&client->dev, &m41t80->sqw);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return clk;
+}
+#endif
+
#ifdef CONFIG_RTC_DRV_M41T80_WDT
/*
*****************************************************************************
@@ -845,6 +939,7 @@ static int m41t80_probe(struct i2c_client *client,
if (!m41t80_data)
return -ENOMEM;
+ m41t80_data->client = client;
if (client->dev.of_node)
m41t80_data->features = (unsigned long)
of_device_get_match_data(&client->dev);
@@ -937,6 +1032,10 @@ static int m41t80_probe(struct i2c_client *client,
}
}
#endif
+#ifdef CONFIG_COMMON_CLK
+ if (m41t80_data->features & M41T80_FEATURE_SQ)
+ m41t80_sqw_register_clk(m41t80_data);
+#endif
return 0;
}
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 77319122642a..401f46d8f21b 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -43,17 +43,6 @@
#define MAX_PIE_NUM 9
#define MAX_PIE_FREQ 512
-static const u32 PIE_BIT_DEF[MAX_PIE_NUM][2] = {
- { 2, RTC_2HZ_BIT },
- { 4, RTC_SAM0_BIT },
- { 8, RTC_SAM1_BIT },
- { 16, RTC_SAM2_BIT },
- { 32, RTC_SAM3_BIT },
- { 64, RTC_SAM4_BIT },
- { 128, RTC_SAM5_BIT },
- { 256, RTC_SAM6_BIT },
- { MAX_PIE_FREQ, RTC_SAM7_BIT },
-};
#define MXC_RTC_TIME 0
#define MXC_RTC_ALARM 1
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index b1b6b3041bfb..4ed81117cf5f 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -93,7 +93,7 @@ static int *check_rtc_access_enable(struct nuc900_rtc *nuc900_rtc)
__raw_writel(AERPOWERON, nuc900_rtc->rtc_reg + REG_RTC_AER);
while (!(__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB)
- && timeout--)
+ && --timeout)
mdelay(1);
if (!timeout)
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index ea20f627dabe..e2a946c0e667 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -142,6 +142,16 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
y_m_d = be32_to_cpu(__y_m_d);
h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32);
+
+ /* check if no alarm is set */
+ if (y_m_d == 0 && h_m_s_ms == 0) {
+ pr_debug("No alarm is set\n");
+ rc = -ENOENT;
+ goto exit;
+ } else {
+ pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms);
+ }
+
opal_to_tm(y_m_d, h_m_s_ms, &alarm->time);
exit:
@@ -157,7 +167,14 @@ static int opal_set_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
u32 y_m_d = 0;
int token, rc;
- tm_to_opal(&alarm->time, &y_m_d, &h_m_s_ms);
+ /* if alarm is enabled */
+ if (alarm->enabled) {
+ tm_to_opal(&alarm->time, &y_m_d, &h_m_s_ms);
+ pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms);
+
+ } else {
+ pr_debug("Alarm getting disabled\n");
+ }
token = opal_async_get_token_interruptible();
if (token < 0) {
@@ -190,6 +207,18 @@ exit:
return rc;
}
+int opal_tpo_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct rtc_wkalrm alarm = { .enabled = 0 };
+
+ /*
+ * TPO is automatically enabled when opal_set_tpo_time() is called with
+ * non-zero rtc-time. We only handle disable case which needs to be
+ * explicitly told to opal.
+ */
+ return enabled ? 0 : opal_set_tpo_time(dev, &alarm);
+}
+
static struct rtc_class_ops opal_rtc_ops = {
.read_time = opal_get_rtc_time,
.set_time = opal_set_rtc_time,
@@ -205,6 +234,7 @@ static int opal_rtc_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, true);
opal_rtc_ops.read_alarm = opal_get_tpo_time;
opal_rtc_ops.set_alarm = opal_set_tpo_time;
+ opal_rtc_ops.alarm_irq_enable = opal_tpo_alarm_irq_enable;
}
rtc = devm_rtc_device_register(&pdev->dev, DRVNAME, &opal_rtc_ops,
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 1227ceab61ee..cea6ea4df970 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -606,7 +606,7 @@ static int pcf8563_probe(struct i2c_client *client,
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf8563_irq,
IRQF_SHARED|IRQF_ONESHOT|IRQF_TRIGGER_FALLING,
- pcf8563->rtc->name, client);
+ pcf8563_driver.driver.name, client);
if (err) {
dev_err(&client->dev, "unable to request IRQ %d\n",
client->irq);
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 9ad97ab29866..aae2576741a6 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -68,6 +68,7 @@ struct rv8803_data {
struct mutex flags_lock;
u8 ctrl;
enum rv8803_type type;
+ struct nvmem_config nvmem_cfg;
};
static int rv8803_read_reg(const struct i2c_client *client, u8 reg)
@@ -460,48 +461,32 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
}
}
-static ssize_t rv8803_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int rv8803_nvram_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
{
- struct device *dev = kobj_to_dev(kobj);
- struct i2c_client *client = to_i2c_client(dev);
int ret;
- ret = rv8803_write_reg(client, RV8803_RAM, buf[0]);
+ ret = rv8803_write_reg(priv, RV8803_RAM, *(u8 *)val);
if (ret)
return ret;
- return 1;
+ return 0;
}
-static ssize_t rv8803_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int rv8803_nvram_read(void *priv, unsigned int offset,
+ void *val, size_t bytes)
{
- struct device *dev = kobj_to_dev(kobj);
- struct i2c_client *client = to_i2c_client(dev);
int ret;
- ret = rv8803_read_reg(client, RV8803_RAM);
+ ret = rv8803_read_reg(priv, RV8803_RAM);
if (ret < 0)
return ret;
- buf[0] = ret;
+ *(u8 *)val = ret;
- return 1;
+ return 0;
}
-static struct bin_attribute rv8803_nvram_attr = {
- .attr = {
- .name = "nvram",
- .mode = S_IRUGO | S_IWUSR,
- },
- .size = 1,
- .read = rv8803_nvram_read,
- .write = rv8803_nvram_write,
-};
-
static struct rtc_class_ops rv8803_rtc_ops = {
.read_time = rv8803_get_time,
.set_time = rv8803_set_time,
@@ -577,6 +562,11 @@ static int rv8803_probe(struct i2c_client *client,
if (flags & RV8803_FLAG_AF)
dev_warn(&client->dev, "An alarm maybe have been missed.\n");
+ rv8803->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rv8803->rtc)) {
+ return PTR_ERR(rv8803->rtc);
+ }
+
if (client->irq > 0) {
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, rv8803_handle_irq,
@@ -592,12 +582,20 @@ static int rv8803_probe(struct i2c_client *client,
}
}
- rv8803->rtc = devm_rtc_device_register(&client->dev, client->name,
- &rv8803_rtc_ops, THIS_MODULE);
- if (IS_ERR(rv8803->rtc)) {
- dev_err(&client->dev, "unable to register the class device\n");
- return PTR_ERR(rv8803->rtc);
- }
+ rv8803->nvmem_cfg.name = "rv8803_nvram",
+ rv8803->nvmem_cfg.word_size = 1,
+ rv8803->nvmem_cfg.stride = 1,
+ rv8803->nvmem_cfg.size = 1,
+ rv8803->nvmem_cfg.reg_read = rv8803_nvram_read,
+ rv8803->nvmem_cfg.reg_write = rv8803_nvram_write,
+ rv8803->nvmem_cfg.priv = client;
+
+ rv8803->rtc->ops = &rv8803_rtc_ops;
+ rv8803->rtc->nvmem_config = &rv8803->nvmem_cfg;
+ rv8803->rtc->nvram_old_abi = true;
+ err = rtc_register_device(rv8803->rtc);
+ if (err)
+ return err;
err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
if (err)
@@ -609,22 +607,11 @@ static int rv8803_probe(struct i2c_client *client,
return err;
}
- err = device_create_bin_file(&client->dev, &rv8803_nvram_attr);
- if (err)
- return err;
-
rv8803->rtc->max_user_freq = 1;
return 0;
}
-static int rv8803_remove(struct i2c_client *client)
-{
- device_remove_bin_file(&client->dev, &rv8803_nvram_attr);
-
- return 0;
-}
-
static const struct i2c_device_id rv8803_id[] = {
{ "rv8803", rv_8803 },
{ "rx8900", rx_8900 },
@@ -651,7 +638,6 @@ static struct i2c_driver rv8803_driver = {
.of_match_table = of_match_ptr(rv8803_of_match),
},
.probe = rv8803_probe,
- .remove = rv8803_remove,
.id_table = rv8803_id,
};
module_i2c_driver(rv8803_driver);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index d44fb34df8fe..a8992c227f61 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -41,7 +41,7 @@ struct s3c_rtc {
struct clk *rtc_src_clk;
bool clk_disabled;
- struct s3c_rtc_data *data;
+ const struct s3c_rtc_data *data;
int irq_alarm;
int irq_tick;
@@ -49,7 +49,8 @@ struct s3c_rtc {
spinlock_t pie_lock;
spinlock_t alarm_clk_lock;
- int ticnt_save, ticnt_en_save;
+ int ticnt_save;
+ int ticnt_en_save;
bool wake_en;
};
@@ -67,18 +68,32 @@ struct s3c_rtc_data {
void (*disable) (struct s3c_rtc *info);
};
-static void s3c_rtc_enable_clk(struct s3c_rtc *info)
+static int s3c_rtc_enable_clk(struct s3c_rtc *info)
{
unsigned long irq_flags;
+ int ret = 0;
spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
+
if (info->clk_disabled) {
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ ret = clk_enable(info->rtc_clk);
+ if (ret)
+ goto out;
+
+ if (info->data->needs_src_clk) {
+ ret = clk_enable(info->rtc_src_clk);
+ if (ret) {
+ clk_disable(info->rtc_clk);
+ goto out;
+ }
+ }
info->clk_disabled = false;
}
+
+out:
spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
+
+ return ret;
}
static void s3c_rtc_disable_clk(struct s3c_rtc *info)
@@ -121,10 +136,13 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
unsigned int tmp;
+ int ret;
dev_dbg(info->dev, "%s: aie=%d\n", __func__, enabled);
- s3c_rtc_enable_clk(info);
+ ret = s3c_rtc_enable_clk(info);
+ if (ret)
+ return ret;
tmp = readb(info->base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
@@ -135,10 +153,13 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
s3c_rtc_disable_clk(info);
- if (enabled)
- s3c_rtc_enable_clk(info);
- else
+ if (enabled) {
+ ret = s3c_rtc_enable_clk(info);
+ if (ret)
+ return ret;
+ } else {
s3c_rtc_disable_clk(info);
+ }
return 0;
}
@@ -146,10 +167,14 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
/* Set RTC frequency */
static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq)
{
+ int ret;
+
if (!is_power_of_2(freq))
return -EINVAL;
- s3c_rtc_enable_clk(info);
+ ret = s3c_rtc_enable_clk(info);
+ if (ret)
+ return ret;
spin_lock_irq(&info->pie_lock);
if (info->data->set_freq)
@@ -166,10 +191,13 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
unsigned int have_retried = 0;
+ int ret;
- s3c_rtc_enable_clk(info);
+ ret = s3c_rtc_enable_clk(info);
+ if (ret)
+ return ret;
- retry_get_time:
+retry_get_time:
rtc_tm->tm_min = readb(info->base + S3C2410_RTCMIN);
rtc_tm->tm_hour = readb(info->base + S3C2410_RTCHOUR);
rtc_tm->tm_mday = readb(info->base + S3C2410_RTCDATE);
@@ -199,8 +227,8 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_year += 100;
dev_dbg(dev, "read time %04d.%02d.%02d %02d:%02d:%02d\n",
- 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
- rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
+ 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+ rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
rtc_tm->tm_mon -= 1;
@@ -211,10 +239,11 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
int year = tm->tm_year - 100;
+ int ret;
dev_dbg(dev, "set time %04d.%02d.%02d %02d:%02d:%02d\n",
- 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
/* we get around y2k by simply not supporting it */
@@ -223,7 +252,9 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
- s3c_rtc_enable_clk(info);
+ ret = s3c_rtc_enable_clk(info);
+ if (ret)
+ return ret;
writeb(bin2bcd(tm->tm_sec), info->base + S3C2410_RTCSEC);
writeb(bin2bcd(tm->tm_min), info->base + S3C2410_RTCMIN);
@@ -242,8 +273,11 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
struct s3c_rtc *info = dev_get_drvdata(dev);
struct rtc_time *alm_tm = &alrm->time;
unsigned int alm_en;
+ int ret;
- s3c_rtc_enable_clk(info);
+ ret = s3c_rtc_enable_clk(info);
+ if (ret)
+ return ret;
alm_tm->tm_sec = readb(info->base + S3C2410_ALMSEC);
alm_tm->tm_min = readb(info->base + S3C2410_ALMMIN);
@@ -259,9 +293,9 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
dev_dbg(dev, "read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
- alm_en,
- 1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
- alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
+ alm_en,
+ 1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
+ alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
/* decode the alarm enable field */
if (alm_en & S3C2410_RTCALM_SECEN)
@@ -292,14 +326,17 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
struct s3c_rtc *info = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
unsigned int alrm_en;
+ int ret;
int year = tm->tm_year - 100;
dev_dbg(dev, "s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
- alrm->enabled,
- 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ alrm->enabled,
+ 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
- s3c_rtc_enable_clk(info);
+ ret = s3c_rtc_enable_clk(info);
+ if (ret)
+ return ret;
alrm_en = readb(info->base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
writeb(0x00, info->base + S3C2410_RTCALM);
@@ -348,8 +385,11 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
+ int ret;
- s3c_rtc_enable_clk(info);
+ ret = s3c_rtc_enable_clk(info);
+ if (ret)
+ return ret;
if (info->data->enable_tick)
info->data->enable_tick(info, seq);
@@ -378,8 +418,7 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info)
dev_info(info->dev, "rtc disabled, re-enabling\n");
tmp = readw(info->base + S3C2410_RTCCON);
- writew(tmp | S3C2410_RTCCON_RTCEN,
- info->base + S3C2410_RTCCON);
+ writew(tmp | S3C2410_RTCCON_RTCEN, info->base + S3C2410_RTCCON);
}
if (con & S3C2410_RTCCON_CNTSEL) {
@@ -387,7 +426,7 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info)
tmp = readw(info->base + S3C2410_RTCCON);
writew(tmp & ~S3C2410_RTCCON_CNTSEL,
- info->base + S3C2410_RTCCON);
+ info->base + S3C2410_RTCCON);
}
if (con & S3C2410_RTCCON_CLKRST) {
@@ -395,7 +434,7 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info)
tmp = readw(info->base + S3C2410_RTCCON);
writew(tmp & ~S3C2410_RTCCON_CLKRST,
- info->base + S3C2410_RTCCON);
+ info->base + S3C2410_RTCCON);
}
}
@@ -437,12 +476,12 @@ static int s3c_rtc_remove(struct platform_device *pdev)
static const struct of_device_id s3c_rtc_dt_match[];
-static struct s3c_rtc_data *s3c_rtc_get_data(struct platform_device *pdev)
+static const struct s3c_rtc_data *s3c_rtc_get_data(struct platform_device *pdev)
{
const struct of_device_id *match;
match = of_match_node(s3c_rtc_dt_match, pdev->dev.of_node);
- return (struct s3c_rtc_data *)match->data;
+ return match->data;
}
static int s3c_rtc_probe(struct platform_device *pdev)
@@ -481,7 +520,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
}
dev_dbg(&pdev->dev, "s3c2410_rtc: tick irq %d, alarm irq %d\n",
- info->irq_tick, info->irq_alarm);
+ info->irq_tick, info->irq_alarm);
/* get the memory region */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -498,7 +537,9 @@ static int s3c_rtc_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "probe deferred due to missing rtc clk\n");
return ret;
}
- clk_prepare_enable(info->rtc_clk);
+ ret = clk_prepare_enable(info->rtc_clk);
+ if (ret)
+ return ret;
if (info->data->needs_src_clk) {
info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src");
@@ -510,10 +551,11 @@ static int s3c_rtc_probe(struct platform_device *pdev)
else
dev_dbg(&pdev->dev,
"probe deferred due to missing rtc src clk\n");
- clk_disable_unprepare(info->rtc_clk);
- return ret;
+ goto err_src_clk;
}
- clk_prepare_enable(info->rtc_src_clk);
+ ret = clk_prepare_enable(info->rtc_src_clk);
+ if (ret)
+ goto err_src_clk;
}
/* check to see if everything is setup correctly */
@@ -521,7 +563,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
info->data->enable(info);
dev_dbg(&pdev->dev, "s3c2410_rtc: RTCCON=%02x\n",
- readw(info->base + S3C2410_RTCCON));
+ readw(info->base + S3C2410_RTCCON));
device_init_wakeup(&pdev->dev, 1);
@@ -541,7 +583,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
/* register RTC and exit */
info->rtc = devm_rtc_device_register(&pdev->dev, "s3c", &s3c_rtcops,
- THIS_MODULE);
+ THIS_MODULE);
if (IS_ERR(info->rtc)) {
dev_err(&pdev->dev, "cannot attach rtc\n");
ret = PTR_ERR(info->rtc);
@@ -549,14 +591,14 @@ static int s3c_rtc_probe(struct platform_device *pdev)
}
ret = devm_request_irq(&pdev->dev, info->irq_alarm, s3c_rtc_alarmirq,
- 0, "s3c2410-rtc alarm", info);
+ 0, "s3c2410-rtc alarm", info);
if (ret) {
dev_err(&pdev->dev, "IRQ%d error %d\n", info->irq_alarm, ret);
goto err_nortc;
}
ret = devm_request_irq(&pdev->dev, info->irq_tick, s3c_rtc_tickirq,
- 0, "s3c2410-rtc tick", info);
+ 0, "s3c2410-rtc tick", info);
if (ret) {
dev_err(&pdev->dev, "IRQ%d error %d\n", info->irq_tick, ret);
goto err_nortc;
@@ -569,12 +611,13 @@ static int s3c_rtc_probe(struct platform_device *pdev)
return 0;
- err_nortc:
+err_nortc:
if (info->data->disable)
info->data->disable(info);
if (info->data->needs_src_clk)
clk_disable_unprepare(info->rtc_src_clk);
+err_src_clk:
clk_disable_unprepare(info->rtc_clk);
return ret;
@@ -585,8 +628,11 @@ static int s3c_rtc_probe(struct platform_device *pdev)
static int s3c_rtc_suspend(struct device *dev)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
+ int ret;
- s3c_rtc_enable_clk(info);
+ ret = s3c_rtc_enable_clk(info);
+ if (ret)
+ return ret;
/* save TICNT for anyone using periodic interrupts */
if (info->data->save_tick_cnt)
@@ -747,8 +793,7 @@ static void s3c6410_rtc_restore_tick_cnt(struct s3c_rtc *info)
writel(info->ticnt_save, info->base + S3C2410_TICNT);
if (info->ticnt_en_save) {
con = readw(info->base + S3C2410_RTCCON);
- writew(con | info->ticnt_en_save,
- info->base + S3C2410_RTCCON);
+ writew(con | info->ticnt_en_save, info->base + S3C2410_RTCCON);
}
}
@@ -802,19 +847,19 @@ static struct s3c_rtc_data const s3c6410_rtc_data = {
static const struct of_device_id s3c_rtc_dt_match[] = {
{
.compatible = "samsung,s3c2410-rtc",
- .data = (void *)&s3c2410_rtc_data,
+ .data = &s3c2410_rtc_data,
}, {
.compatible = "samsung,s3c2416-rtc",
- .data = (void *)&s3c2416_rtc_data,
+ .data = &s3c2416_rtc_data,
}, {
.compatible = "samsung,s3c2443-rtc",
- .data = (void *)&s3c2443_rtc_data,
+ .data = &s3c2443_rtc_data,
}, {
.compatible = "samsung,s3c6410-rtc",
- .data = (void *)&s3c6410_rtc_data,
+ .data = &s3c6410_rtc_data,
}, {
.compatible = "samsung,exynos3250-rtc",
- .data = (void *)&s3c6410_rtc_data,
+ .data = &s3c6410_rtc_data,
},
{ /* sentinel */ },
};
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 74c0a336ceea..82b0af159a28 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -99,7 +99,7 @@ static int st_rtc_read_time(struct device *dev, struct rtc_time *tm)
lpt = ((unsigned long long)lpt_msb << 32) | lpt_lsb;
do_div(lpt, rtc->clkrate);
- rtc_time_to_tm(lpt, tm);
+ rtc_time64_to_tm(lpt, tm);
return 0;
}
@@ -107,13 +107,10 @@ static int st_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int st_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct st_rtc *rtc = dev_get_drvdata(dev);
- unsigned long long lpt;
- unsigned long secs, flags;
- int ret;
+ unsigned long long lpt, secs;
+ unsigned long flags;
- ret = rtc_tm_to_time(tm, &secs);
- if (ret)
- return ret;
+ secs = rtc_tm_to_time64(tm);
lpt = (unsigned long long)secs * rtc->clkrate;
@@ -161,13 +158,13 @@ static int st_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct st_rtc *rtc = dev_get_drvdata(dev);
struct rtc_time now;
- unsigned long now_secs;
- unsigned long alarm_secs;
+ unsigned long long now_secs;
+ unsigned long long alarm_secs;
unsigned long long lpa;
st_rtc_read_time(dev, &now);
- rtc_tm_to_time(&now, &now_secs);
- rtc_tm_to_time(&t->time, &alarm_secs);
+ now_secs = rtc_tm_to_time64(&now);
+ alarm_secs = rtc_tm_to_time64(&t->time);
/* Invalid alarm time */
if (now_secs > alarm_secs)
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index bd57eb1029e1..3a5c3d7d0c77 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -94,11 +94,17 @@
/* STM32_PWR_CR bit field */
#define PWR_CR_DBP BIT(8)
+struct stm32_rtc_data {
+ bool has_pclk;
+};
+
struct stm32_rtc {
struct rtc_device *rtc_dev;
void __iomem *base;
struct regmap *dbp;
- struct clk *ck_rtc;
+ struct stm32_rtc_data *data;
+ struct clk *pclk;
+ struct clk *rtc_ck;
int irq_alarm;
};
@@ -122,9 +128,9 @@ static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc)
writel_relaxed(isr, rtc->base + STM32_RTC_ISR);
/*
- * It takes around 2 ck_rtc clock cycles to enter in
+ * It takes around 2 rtc_ck clock cycles to enter in
* initialization phase mode (and have INITF flag set). As
- * slowest ck_rtc frequency may be 32kHz and highest should be
+ * slowest rtc_ck frequency may be 32kHz and highest should be
* 1MHz, we poll every 10 us with a timeout of 100ms.
*/
return readl_relaxed_poll_timeout_atomic(
@@ -153,7 +159,7 @@ static int stm32_rtc_wait_sync(struct stm32_rtc *rtc)
/*
* Wait for RSF to be set to ensure the calendar registers are
- * synchronised, it takes around 2 ck_rtc clock cycles
+ * synchronised, it takes around 2 rtc_ck clock cycles
*/
return readl_relaxed_poll_timeout_atomic(rtc->base + STM32_RTC_ISR,
isr,
@@ -456,7 +462,7 @@ static int stm32_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
/*
* Poll Alarm write flag to be sure that Alarm update is allowed: it
- * takes around 2 ck_rtc clock cycles
+ * takes around 2 rtc_ck clock cycles
*/
ret = readl_relaxed_poll_timeout_atomic(rtc->base + STM32_RTC_ISR,
isr,
@@ -490,8 +496,17 @@ static const struct rtc_class_ops stm32_rtc_ops = {
.alarm_irq_enable = stm32_rtc_alarm_irq_enable,
};
+static const struct stm32_rtc_data stm32_rtc_data = {
+ .has_pclk = false,
+};
+
+static const struct stm32_rtc_data stm32h7_rtc_data = {
+ .has_pclk = true,
+};
+
static const struct of_device_id stm32_rtc_of_match[] = {
- { .compatible = "st,stm32-rtc" },
+ { .compatible = "st,stm32-rtc", .data = &stm32_rtc_data },
+ { .compatible = "st,stm32h7-rtc", .data = &stm32h7_rtc_data },
{}
};
MODULE_DEVICE_TABLE(of, stm32_rtc_of_match);
@@ -503,7 +518,7 @@ static int stm32_rtc_init(struct platform_device *pdev,
unsigned int rate;
int ret = 0;
- rate = clk_get_rate(rtc->ck_rtc);
+ rate = clk_get_rate(rtc->rtc_ck);
/* Find prediv_a and prediv_s to obtain the 1Hz calendar clock */
pred_a_max = STM32_RTC_PRER_PRED_A >> STM32_RTC_PRER_PRED_A_SHIFT;
@@ -524,7 +539,7 @@ static int stm32_rtc_init(struct platform_device *pdev,
pred_a = pred_a_max;
pred_s = (rate / (pred_a + 1)) - 1;
- dev_warn(&pdev->dev, "ck_rtc is %s\n",
+ dev_warn(&pdev->dev, "rtc_ck is %s\n",
(rate < ((pred_a + 1) * (pred_s + 1))) ?
"fast" : "slow");
}
@@ -561,6 +576,7 @@ static int stm32_rtc_probe(struct platform_device *pdev)
{
struct stm32_rtc *rtc;
struct resource *res;
+ const struct of_device_id *match;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
@@ -579,15 +595,34 @@ static int stm32_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc->dbp);
}
- rtc->ck_rtc = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(rtc->ck_rtc)) {
- dev_err(&pdev->dev, "no ck_rtc clock");
- return PTR_ERR(rtc->ck_rtc);
+ match = of_match_device(stm32_rtc_of_match, &pdev->dev);
+ rtc->data = (struct stm32_rtc_data *)match->data;
+
+ if (!rtc->data->has_pclk) {
+ rtc->pclk = NULL;
+ rtc->rtc_ck = devm_clk_get(&pdev->dev, NULL);
+ } else {
+ rtc->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(rtc->pclk)) {
+ dev_err(&pdev->dev, "no pclk clock");
+ return PTR_ERR(rtc->pclk);
+ }
+ rtc->rtc_ck = devm_clk_get(&pdev->dev, "rtc_ck");
+ }
+ if (IS_ERR(rtc->rtc_ck)) {
+ dev_err(&pdev->dev, "no rtc_ck clock");
+ return PTR_ERR(rtc->rtc_ck);
+ }
+
+ if (rtc->data->has_pclk) {
+ ret = clk_prepare_enable(rtc->pclk);
+ if (ret)
+ return ret;
}
- ret = clk_prepare_enable(rtc->ck_rtc);
+ ret = clk_prepare_enable(rtc->rtc_ck);
if (ret)
- return ret;
+ goto err;
regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, PWR_CR_DBP);
@@ -595,7 +630,7 @@ static int stm32_rtc_probe(struct platform_device *pdev)
* After a system reset, RTC_ISR.INITS flag can be read to check if
* the calendar has been initalized or not. INITS flag is reset by a
* power-on reset (no vbat, no power-supply). It is not reset if
- * ck_rtc parent clock has changed (so RTC prescalers need to be
+ * rtc_ck parent clock has changed (so RTC prescalers need to be
* changed). That's why we cannot rely on this flag to know if RTC
* init has to be done.
*/
@@ -646,7 +681,9 @@ static int stm32_rtc_probe(struct platform_device *pdev)
return 0;
err:
- clk_disable_unprepare(rtc->ck_rtc);
+ if (rtc->data->has_pclk)
+ clk_disable_unprepare(rtc->pclk);
+ clk_disable_unprepare(rtc->rtc_ck);
regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, 0);
@@ -667,7 +704,9 @@ static int stm32_rtc_remove(struct platform_device *pdev)
writel_relaxed(cr, rtc->base + STM32_RTC_CR);
stm32_rtc_wpr_lock(rtc);
- clk_disable_unprepare(rtc->ck_rtc);
+ clk_disable_unprepare(rtc->rtc_ck);
+ if (rtc->data->has_pclk)
+ clk_disable_unprepare(rtc->pclk);
/* Enable backup domain write protection */
regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, 0);
@@ -682,6 +721,9 @@ static int stm32_rtc_suspend(struct device *dev)
{
struct stm32_rtc *rtc = dev_get_drvdata(dev);
+ if (rtc->data->has_pclk)
+ clk_disable_unprepare(rtc->pclk);
+
if (device_may_wakeup(dev))
return enable_irq_wake(rtc->irq_alarm);
@@ -693,6 +735,12 @@ static int stm32_rtc_resume(struct device *dev)
struct stm32_rtc *rtc = dev_get_drvdata(dev);
int ret = 0;
+ if (rtc->data->has_pclk) {
+ ret = clk_prepare_enable(rtc->pclk);
+ if (ret)
+ return ret;
+ }
+
ret = stm32_rtc_wait_sync(rtc);
if (ret < 0)
return ret;
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 1218d5d4224d..e364550eb9a7 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -27,7 +27,8 @@
static ssize_t
name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%s\n", to_rtc_device(dev)->name);
+ return sprintf(buf, "%s %s\n", dev_driver_string(dev->parent),
+ dev_name(dev->parent));
}
static DEVICE_ATTR_RO(name);
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 65f5a794f26d..98749fa817da 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -98,7 +98,7 @@ vmcp_write(struct file *file, const char __user *buff, size_t count,
}
if (!session->response)
session->response = (char *)__get_free_pages(GFP_KERNEL
- | __GFP_REPEAT | GFP_DMA,
+ | __GFP_RETRY_MAYFAIL | GFP_DMA,
get_order(session->bufsize));
if (!session->response) {
mutex_unlock(&session->mutex);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 1563b1458e44..2ade6131a89f 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1115,7 +1115,7 @@ static const struct net_device_ops ctcm_mpc_netdev_ops = {
.ndo_start_xmit = ctcmpc_tx,
};
-void static ctcm_dev_setup(struct net_device *dev)
+static void ctcm_dev_setup(struct net_device *dev)
{
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 100;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 3062cde33a3d..8975cd321390 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2408,7 +2408,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return rc;
}
-int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+inline int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
{
int cast_type = RTN_UNSPEC;
struct neighbour *n = NULL;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 077f62e208aa..6a4367cc9caa 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3401,9 +3401,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
if (is_write) {
req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
- rc = copy_from_user(kbuf, ubuf, ulen);
- if (unlikely(rc))
+ if (copy_from_user(kbuf, ubuf, ulen)) {
+ rc = -EFAULT;
goto out;
+ }
}
}
@@ -3431,8 +3432,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
goto out;
}
- if (ulen && !is_write)
- rc = copy_to_user(ubuf, kbuf, ulen);
+ if (ulen && !is_write) {
+ if (copy_to_user(ubuf, kbuf, ulen))
+ rc = -EFAULT;
+ }
out:
kfree(buf);
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 551d103c27f1..2bfea7082e3a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1693,7 +1693,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
{
- const u8 trans_tx_err_code_prio[] = {
+ static const u8 trans_tx_err_code_prio[] = {
TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS,
TRANS_TX_ERR_PHY_NOT_ENABLE,
TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION,
@@ -1738,7 +1738,7 @@ static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
{
- const u8 trans_rx_err_code_prio[] = {
+ static const u8 trans_rx_err_code_prio[] = {
TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR,
TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR,
TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM,
@@ -1784,7 +1784,7 @@ static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
{
- const u8 dma_tx_err_code_prio[] = {
+ static const u8 dma_tx_err_code_prio[] = {
DMA_TX_UNEXP_XFER_ERR,
DMA_TX_UNEXP_RETRANS_ERR,
DMA_TX_XFER_LEN_OVERFLOW,
@@ -1810,7 +1810,7 @@ static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
{
- const u8 sipc_rx_err_code_prio[] = {
+ static const u8 sipc_rx_err_code_prio[] = {
SIPC_RX_FIS_STATUS_ERR_BIT_VLD,
SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR,
SIPC_RX_FIS_STATUS_BSY_BIT_ERR,
@@ -1836,7 +1836,7 @@ static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
static int parse_dma_rx_err_code_v2_hw(u32 err_msk)
{
- const u8 dma_rx_err_code_prio[] = {
+ static const u8 dma_rx_err_code_prio[] = {
DMA_RX_UNKNOWN_FRM_ERR,
DMA_RX_DATA_LEN_OVERFLOW,
DMA_RX_DATA_LEN_UNDERFLOW,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 659ab483d716..1f75d0380516 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -155,6 +155,9 @@ static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
qrc = h_free_crq(vscsi->dds.unit_id);
switch (qrc) {
case H_SUCCESS:
+ spin_lock_bh(&vscsi->intr_lock);
+ vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS;
+ spin_unlock_bh(&vscsi->intr_lock);
break;
case H_HARDWARE:
@@ -422,6 +425,9 @@ static void ibmvscsis_disconnect(struct work_struct *work)
new_state = vscsi->new_state;
vscsi->new_state = 0;
+ vscsi->flags |= DISCONNECT_SCHEDULED;
+ vscsi->flags &= ~SCHEDULE_DISCONNECT;
+
pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
vscsi->state);
@@ -802,6 +808,13 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
long rc = ADAPT_SUCCESS;
uint format;
+ rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000,
+ 0, 0, 0, 0);
+ if (rc == H_SUCCESS)
+ vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
+ else if (rc != H_NOT_FOUND)
+ pr_err("Error from Enable Prepare for Suspend: %ld\n", rc);
+
vscsi->flags &= PRESERVE_FLAG_FIELDS;
vscsi->rsp_q_timer.timer_pops = 0;
vscsi->debit = 0;
@@ -951,6 +964,63 @@ static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
}
/**
+ * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL
+ * @vscsi: Pointer to our adapter structure
+ * @idle: Indicates whether we were called from adapter_idle. This
+ * is important to know if we need to do a disconnect, since if
+ * we're called from adapter_idle, we're still processing the
+ * current disconnect, so we can't just call post_disconnect.
+ *
+ * This function is called when the adapter is idle when phyp has sent
+ * us a Prepare for Suspend Transport Event.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process or interrupt environment called with interrupt lock held
+ */
+static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
+{
+ long rc = 0;
+ struct viosrp_crq *crq;
+
+ /* See if there is a Resume event in the queue */
+ crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
+
+ pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
+ vscsi->flags, vscsi->state, (int)crq->valid);
+
+ if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
+ rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
+ 0, 0);
+ if (rc) {
+ pr_err("Ready for Suspend Vioctl failed: %ld\n", rc);
+ rc = 0;
+ }
+ } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
+ (vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) ||
+ ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
+ (crq->format != RESUME_FROM_SUSP)))) {
+ if (idle) {
+ vscsi->state = ERR_DISCONNECT_RECONNECT;
+ ibmvscsis_reset_queue(vscsi);
+ rc = -1;
+ } else if (vscsi->state == CONNECTED) {
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ }
+
+ vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
+
+ if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
+ (crq->format != RESUME_FROM_SUSP)))
+ pr_err("Invalid element in CRQ after Prepare for Suspend");
+ }
+
+ vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
+
+ return rc;
+}
+
+/**
* ibmvscsis_trans_event() - Handle a Transport Event
* @vscsi: Pointer to our adapter structure
* @crq: Pointer to CRQ entry containing the Transport Event
@@ -974,18 +1044,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
case PARTNER_FAILED:
case PARTNER_DEREGISTER:
ibmvscsis_delete_client_info(vscsi, true);
- break;
-
- default:
- rc = ERROR;
- dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
- (uint)crq->format);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
- RESPONSE_Q_DOWN);
- break;
- }
-
- if (rc == ADAPT_SUCCESS) {
+ if (crq->format == MIGRATED)
+ vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
switch (vscsi->state) {
case NO_QUEUE:
case ERR_DISCONNECTED:
@@ -1034,6 +1094,60 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
break;
}
+ break;
+
+ case PREPARE_FOR_SUSPEND:
+ pr_debug("Prep for Suspend, crq status = 0x%x\n",
+ (int)crq->status);
+ switch (vscsi->state) {
+ case ERR_DISCONNECTED:
+ case WAIT_CONNECTION:
+ case CONNECTED:
+ ibmvscsis_ready_for_suspend(vscsi, false);
+ break;
+ case SRP_PROCESSING:
+ vscsi->resume_state = vscsi->state;
+ vscsi->flags |= PREP_FOR_SUSPEND_PENDING;
+ if (crq->status == CRQ_ENTRY_OVERWRITTEN)
+ vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE;
+ ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
+ break;
+ case NO_QUEUE:
+ case UNDEFINED:
+ case UNCONFIGURING:
+ case WAIT_ENABLED:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case WAIT_IDLE:
+ pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
+ vscsi->state);
+ break;
+ }
+ break;
+
+ case RESUME_FROM_SUSP:
+ pr_debug("Resume from Suspend, crq status = 0x%x\n",
+ (int)crq->status);
+ if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
+ vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
+ } else {
+ if ((crq->status == CRQ_ENTRY_OVERWRITTEN) ||
+ (vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) {
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT,
+ 0);
+ vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
+ }
+ }
+ break;
+
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
+ (uint)crq->format);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
+ RESPONSE_Q_DOWN);
+ break;
}
rc = vscsi->flags & SCHEDULE_DISCONNECT;
@@ -1201,6 +1315,7 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
{
int free_qs = false;
+ long rc = 0;
pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
vscsi->state);
@@ -1240,7 +1355,14 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->rsp_q_timer.timer_pops = 0;
vscsi->debit = 0;
vscsi->credit = 0;
- if (vscsi->flags & TRANS_EVENT) {
+ if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
+ vscsi->state = vscsi->resume_state;
+ vscsi->resume_state = 0;
+ rc = ibmvscsis_ready_for_suspend(vscsi, true);
+ vscsi->flags &= ~DISCONNECT_SCHEDULED;
+ if (rc)
+ break;
+ } else if (vscsi->flags & TRANS_EVENT) {
vscsi->state = WAIT_CONNECTION;
vscsi->flags &= PRESERVE_FLAG_FIELDS;
} else {
@@ -3792,8 +3914,16 @@ static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
{
struct ibmvscsis_tport *tport =
container_of(wwn, struct ibmvscsis_tport, tport_wwn);
+ u16 tpgt;
int rc;
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ rc = kstrtou16(name + 5, 0, &tpgt);
+ if (rc)
+ return ERR_PTR(rc);
+ tport->tport_tpgt = tpgt;
+
tport->releasing = false;
rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index b4391a8de456..cc96c2731134 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -262,6 +262,14 @@ struct scsi_info {
#define DISCONNECT_SCHEDULED 0x00800
/* remove function is sleeping */
#define CFG_SLEEPING 0x01000
+ /* Register for Prepare for Suspend Transport Events */
+#define PREP_FOR_SUSPEND_ENABLED 0x02000
+ /* Prepare for Suspend event sent */
+#define PREP_FOR_SUSPEND_PENDING 0x04000
+ /* Resume from Suspend event sent */
+#define PREP_FOR_SUSPEND_ABORTED 0x08000
+ /* Prepare for Suspend event overwrote another CRQ entry */
+#define PREP_FOR_SUSPEND_OVERWRITE 0x10000
u32 flags;
/* adapter lock */
spinlock_t intr_lock;
@@ -272,6 +280,7 @@ struct scsi_info {
/* used in crq, to tag what iu the response is for */
u64 empty_iu_tag;
uint new_state;
+ uint resume_state;
/* control block for the response queue timer */
struct timer_cb rsp_q_timer;
/* keep last client to enable proper accounting */
@@ -324,8 +333,13 @@ struct scsi_info {
#define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \
((VSCSI)->flags & BLOCK))
+#define PREP_FOR_SUSPEND_FLAGS (PREP_FOR_SUSPEND_ENABLED | \
+ PREP_FOR_SUSPEND_PENDING | \
+ PREP_FOR_SUSPEND_ABORTED | \
+ PREP_FOR_SUSPEND_OVERWRITE)
+
/* flag bit that are not reset during disconnect */
-#define PRESERVE_FLAG_FIELDS 0
+#define PRESERVE_FLAG_FIELDS (PREP_FOR_SUSPEND_FLAGS)
#define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf))
@@ -333,8 +347,15 @@ struct scsi_info {
#define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA)
#ifndef H_GET_PARTNER_INFO
-#define H_GET_PARTNER_INFO 0x0000000000000008LL
+#define H_GET_PARTNER_INFO 0x0000000000000008LL
+#endif
+#ifndef H_ENABLE_PREPARE_FOR_SUSPEND
+#define H_ENABLE_PREPARE_FOR_SUSPEND 0x000000000000001DLL
#endif
+#ifndef H_READY_FOR_SUSPEND
+#define H_READY_FOR_SUSPEND 0x000000000000001ELL
+#endif
+
#define h_copy_rdma(l, sa, sb, da, db) \
plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.h b/drivers/scsi/ibmvscsi_tgt/libsrp.h
index 4696f331453e..9fec55b36322 100644
--- a/drivers/scsi/ibmvscsi_tgt/libsrp.h
+++ b/drivers/scsi/ibmvscsi_tgt/libsrp.h
@@ -30,10 +30,13 @@ enum srp_trans_event {
UNUSED_FORMAT = 0,
PARTNER_FAILED = 1,
PARTNER_DEREGISTER = 2,
- MIGRATED = 6
+ MIGRATED = 6,
+ PREPARE_FOR_SUSPEND = 9,
+ RESUME_FROM_SUSP = 0xA
};
enum srp_status {
+ CRQ_ENTRY_OVERWRITTEN = 0x20,
HEADER_DESCRIPTOR = 0xF1,
PING = 0xF5,
PING_RESPONSE = 0xF6
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 47f66e949745..ed197bc8e801 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -213,7 +213,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
* @task_context:
*
*/
-static void scu_ssp_reqeust_construct_task_context(
+static void scu_ssp_request_construct_task_context(
struct isci_request *ireq,
struct scu_task_context *task_context)
{
@@ -425,7 +425,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
u8 prot_type = scsi_get_prot_type(scmd);
u8 prot_op = scsi_get_prot_op(scmd);
- scu_ssp_reqeust_construct_task_context(ireq, task_context);
+ scu_ssp_request_construct_task_context(ireq, task_context);
task_context->ssp_command_iu_length =
sizeof(struct ssp_cmd_iu) / sizeof(u32);
@@ -472,7 +472,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
{
struct scu_task_context *task_context = ireq->tc;
- scu_ssp_reqeust_construct_task_context(ireq, task_context);
+ scu_ssp_request_construct_task_context(ireq, task_context);
task_context->control_frame = 1;
task_context->priority = SCU_TASK_PRIORITY_HIGH;
@@ -495,7 +495,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
* the command buffer is complete. none Revisit task context construction to
* determine what is common for SSP/SMP/STP task context structures.
*/
-static void scu_sata_reqeust_construct_task_context(
+static void scu_sata_request_construct_task_context(
struct isci_request *ireq,
struct scu_task_context *task_context)
{
@@ -562,7 +562,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq
{
struct scu_task_context *task_context = ireq->tc;
- scu_sata_reqeust_construct_task_context(ireq, task_context);
+ scu_sata_request_construct_task_context(ireq, task_context);
task_context->control_frame = 0;
task_context->priority = SCU_TASK_PRIORITY_NORMAL;
@@ -613,7 +613,7 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq,
struct scu_task_context *task_context = ireq->tc;
/* Build the STP task context structure */
- scu_sata_reqeust_construct_task_context(ireq, task_context);
+ scu_sata_request_construct_task_context(ireq, task_context);
/* Copy over the SGL elements */
sci_request_build_sgl(ireq);
@@ -1401,7 +1401,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
* @data_buffer: The buffer of data to be copied.
* @length: The length of the data transfer.
*
- * Copy the data from the buffer for the length specified to the IO reqeust SGL
+ * Copy the data from the buffer for the length specified to the IO request SGL
* specified data region. enum sci_status
*/
static enum sci_status
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index fd501f8dbb11..8660f923ace0 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -573,7 +573,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
event = DISC_EV_FAILED;
}
if (error)
- fc_disc_error(disc, fp);
+ fc_disc_error(disc, ERR_PTR(error));
else if (event != DISC_EV_NONE)
fc_disc_done(disc, event);
fc_frame_free(fp);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index b58bba4604e8..7786c97e033f 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1227,7 +1227,7 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
if (rdata->spp_type != FC_TYPE_FCP) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
- "Not offlading since since spp type isn't FCP\n");
+ "Not offloading since spp type isn't FCP\n");
break;
}
if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 32632c9b2276..91d2f51c351b 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -23,11 +23,17 @@
#include <linux/qed/qed_iscsi_if.h>
#include <linux/qed/qed_ll2_if.h>
#include "qedi_version.h"
+#include "qedi_nvm_iscsi_cfg.h"
#define QEDI_MODULE_NAME "qedi"
struct qedi_endpoint;
+#ifndef GET_FIELD2
+#define GET_FIELD2(value, name) \
+ (((value) & (name ## _MASK)) >> (name ## _OFFSET))
+#endif
+
/*
* PCI function probe defines
*/
@@ -66,6 +72,11 @@ struct qedi_endpoint;
#define QEDI_HW_DMA_BOUNDARY 0xfff
#define QEDI_PATH_HANDLE 0xFE0000000UL
+enum qedi_nvm_tgts {
+ QEDI_NVM_TGT_PRI,
+ QEDI_NVM_TGT_SEC,
+};
+
struct qedi_uio_ctrl {
/* meta data */
u32 uio_hsi_version;
@@ -283,6 +294,8 @@ struct qedi_ctx {
void *bdq_pbl_list;
dma_addr_t bdq_pbl_list_dma;
u8 bdq_pbl_list_num_entries;
+ struct nvm_iscsi_cfg *iscsi_cfg;
+ dma_addr_t nvm_buf_dma;
void __iomem *bdq_primary_prod;
void __iomem *bdq_secondary_prod;
u16 bdq_prod_idx;
@@ -337,6 +350,10 @@ struct qedi_ctx {
bool use_fast_sge;
atomic_t num_offloads;
+#define SYSFS_FLAG_FW_SEL_BOOT 2
+#define IPV6_LEN 41
+#define IPV4_LEN 17
+ struct iscsi_boot_kset *boot_kset;
};
struct qedi_work {
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 19254bd739d9..93d54acd4a22 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -1411,7 +1411,7 @@ static void qedi_tmf_work(struct work_struct *work)
list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
if (!list_work) {
- QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n");
+ QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
goto abort_ret;
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5f5a4ef2e529..2c3783684815 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/if_vlan.h>
#include <linux/cpu.h>
+#include <linux/iscsi_boot_sysfs.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -1143,6 +1144,30 @@ exit_setup_int:
return rc;
}
+static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
+{
+ if (qedi->iscsi_cfg)
+ dma_free_coherent(&qedi->pdev->dev,
+ sizeof(struct nvm_iscsi_cfg),
+ qedi->iscsi_cfg, qedi->nvm_buf_dma);
+}
+
+static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
+{
+ qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
+ sizeof(struct nvm_iscsi_cfg),
+ &qedi->nvm_buf_dma, GFP_KERNEL);
+ if (!qedi->iscsi_cfg) {
+ QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
+ return -ENOMEM;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
+ qedi->nvm_buf_dma);
+
+ return 0;
+}
+
static void qedi_free_bdq(struct qedi_ctx *qedi)
{
int i;
@@ -1183,6 +1208,7 @@ static void qedi_free_global_queues(struct qedi_ctx *qedi)
kfree(gl[i]);
}
qedi_free_bdq(qedi);
+ qedi_free_nvm_iscsi_cfg(qedi);
}
static int qedi_alloc_bdq(struct qedi_ctx *qedi)
@@ -1309,6 +1335,11 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
if (rc)
goto mem_alloc_failure;
+ /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
+ rc = qedi_alloc_nvm_iscsi_cfg(qedi);
+ if (rc)
+ goto mem_alloc_failure;
+
/* Allocate a CQ and an associated PBL for each MSI-X
* vector.
*/
@@ -1671,6 +1702,387 @@ void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
qedi_ops->ll2->start(qedi->cdev, &params);
}
+/**
+ * qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting
+ * for gaps) for the matching absolute-pf-id of the QEDI device.
+ */
+static struct nvm_iscsi_block *
+qedi_get_nvram_block(struct qedi_ctx *qedi)
+{
+ int i;
+ u8 pf;
+ u32 flags;
+ struct nvm_iscsi_block *block;
+
+ pf = qedi->dev_info.common.abs_pf_id;
+ block = &qedi->iscsi_cfg->block[0];
+ for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
+ flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
+ NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
+ if (flags & (NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY |
+ NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED) &&
+ (pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK)
+ >> NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET))
+ return block;
+ }
+ return NULL;
+}
+
+static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
+{
+ struct qedi_ctx *qedi = data;
+ struct nvm_iscsi_initiator *initiator;
+ char *str = buf;
+ int rc = 1;
+ u32 ipv6_en, dhcp_en, ip_len;
+ struct nvm_iscsi_block *block;
+ char *fmt, *ip, *sub, *gw;
+
+ block = qedi_get_nvram_block(qedi);
+ if (!block)
+ return 0;
+
+ initiator = &block->initiator;
+ ipv6_en = block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
+ dhcp_en = block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED;
+ /* Static IP assignments. */
+ fmt = ipv6_en ? "%pI6\n" : "%pI4\n";
+ ip = ipv6_en ? initiator->ipv6.addr.byte : initiator->ipv4.addr.byte;
+ ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
+ sub = ipv6_en ? initiator->ipv6.subnet_mask.byte :
+ initiator->ipv4.subnet_mask.byte;
+ gw = ipv6_en ? initiator->ipv6.gateway.byte :
+ initiator->ipv4.gateway.byte;
+ /* DHCP IP adjustments. */
+ fmt = dhcp_en ? "%s\n" : fmt;
+ if (dhcp_en) {
+ ip = ipv6_en ? "0::0" : "0.0.0.0";
+ sub = ip;
+ gw = ip;
+ ip_len = ipv6_en ? 5 : 8;
+ }
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_IP_ADDR:
+ rc = snprintf(str, ip_len, fmt, ip);
+ break;
+ case ISCSI_BOOT_ETH_SUBNET_MASK:
+ rc = snprintf(str, ip_len, fmt, sub);
+ break;
+ case ISCSI_BOOT_ETH_GATEWAY:
+ rc = snprintf(str, ip_len, fmt, gw);
+ break;
+ case ISCSI_BOOT_ETH_FLAGS:
+ rc = snprintf(str, 3, "%hhd\n",
+ SYSFS_FLAG_FW_SEL_BOOT);
+ break;
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = snprintf(str, 3, "0\n");
+ break;
+ case ISCSI_BOOT_ETH_MAC:
+ rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN);
+ break;
+ case ISCSI_BOOT_ETH_VLAN:
+ rc = snprintf(str, 12, "%d\n",
+ GET_FIELD2(initiator->generic_cont0,
+ NVM_ISCSI_CFG_INITIATOR_VLAN));
+ break;
+ case ISCSI_BOOT_ETH_ORIGIN:
+ if (dhcp_en)
+ rc = snprintf(str, 3, "3\n");
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+
+ return rc;
+}
+
+static umode_t qedi_eth_get_attr_visibility(void *data, int type)
+{
+ int rc = 1;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ case ISCSI_BOOT_ETH_MAC:
+ case ISCSI_BOOT_ETH_INDEX:
+ case ISCSI_BOOT_ETH_IP_ADDR:
+ case ISCSI_BOOT_ETH_SUBNET_MASK:
+ case ISCSI_BOOT_ETH_GATEWAY:
+ case ISCSI_BOOT_ETH_ORIGIN:
+ case ISCSI_BOOT_ETH_VLAN:
+ rc = 0444;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
+{
+ struct qedi_ctx *qedi = data;
+ struct nvm_iscsi_initiator *initiator;
+ char *str = buf;
+ int rc;
+ struct nvm_iscsi_block *block;
+
+ block = qedi_get_nvram_block(qedi);
+ if (!block)
+ return 0;
+
+ initiator = &block->initiator;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+ initiator->initiator_name.byte);
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static umode_t qedi_ini_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = 0444;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t
+qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
+ char *buf, enum qedi_nvm_tgts idx)
+{
+ char *str = buf;
+ int rc = 1;
+ u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
+ struct nvm_iscsi_block *block;
+ char *chap_name, *chap_secret;
+ char *mchap_name, *mchap_secret;
+
+ block = qedi_get_nvram_block(qedi);
+ if (!block)
+ goto exit_show_tgt_info;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
+ "Port:%d, tgt_idx:%d\n",
+ GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx);
+
+ ctrl_flags = block->target[idx].ctrl_flags &
+ NVM_ISCSI_CFG_TARGET_ENABLED;
+
+ if (!ctrl_flags) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
+ "Target disabled\n");
+ goto exit_show_tgt_info;
+ }
+
+ ipv6_en = block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
+ ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
+ chap_en = block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_CHAP_ENABLED;
+ chap_name = chap_en ? block->initiator.chap_name.byte : NULL;
+ chap_secret = chap_en ? block->initiator.chap_password.byte : NULL;
+
+ mchap_en = block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED;
+ mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL;
+ mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+ block->target[idx].target_name.byte);
+ break;
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ if (ipv6_en)
+ rc = snprintf(str, ip_len, "%pI6\n",
+ block->target[idx].ipv6_addr.byte);
+ else
+ rc = snprintf(str, ip_len, "%pI4\n",
+ block->target[idx].ipv4_addr.byte);
+ break;
+ case ISCSI_BOOT_TGT_PORT:
+ rc = snprintf(str, 12, "%d\n",
+ GET_FIELD2(block->target[idx].generic_cont0,
+ NVM_ISCSI_CFG_TARGET_TCP_PORT));
+ break;
+ case ISCSI_BOOT_TGT_LUN:
+ rc = snprintf(str, 22, "%.*d\n",
+ block->target[idx].lun.value[1],
+ block->target[idx].lun.value[0]);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
+ chap_name);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
+ chap_secret);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
+ mchap_name);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
+ mchap_secret);
+ break;
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+ break;
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ rc = snprintf(str, 3, "0\n");
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+
+exit_show_tgt_info:
+ return rc;
+}
+
+static ssize_t qedi_show_boot_tgt_pri_info(void *data, int type, char *buf)
+{
+ struct qedi_ctx *qedi = data;
+
+ return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_PRI);
+}
+
+static ssize_t qedi_show_boot_tgt_sec_info(void *data, int type, char *buf)
+{
+ struct qedi_ctx *qedi = data;
+
+ return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_SEC);
+}
+
+static umode_t qedi_tgt_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ case ISCSI_BOOT_TGT_PORT:
+ case ISCSI_BOOT_TGT_LUN:
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = 0444;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static void qedi_boot_release(void *data)
+{
+ struct qedi_ctx *qedi = data;
+
+ scsi_host_put(qedi->shost);
+}
+
+static int qedi_get_boot_info(struct qedi_ctx *qedi)
+{
+ int ret = 1;
+ u16 len;
+
+ len = sizeof(struct nvm_iscsi_cfg);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Get NVM iSCSI CFG image\n");
+ ret = qedi_ops->common->nvm_get_image(qedi->cdev,
+ QED_NVM_IMAGE_ISCSI_CFG,
+ (char *)qedi->iscsi_cfg, len);
+ if (ret)
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not get NVM image. ret = %d\n", ret);
+
+ return ret;
+}
+
+static int qedi_setup_boot_info(struct qedi_ctx *qedi)
+{
+ struct iscsi_boot_kobj *boot_kobj;
+
+ if (qedi_get_boot_info(qedi))
+ return -EPERM;
+
+ qedi->boot_kset = iscsi_boot_create_host_kset(qedi->shost->host_no);
+ if (!qedi->boot_kset)
+ goto kset_free;
+
+ if (!scsi_host_get(qedi->shost))
+ goto kset_free;
+
+ boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 0, qedi,
+ qedi_show_boot_tgt_pri_info,
+ qedi_tgt_get_attr_visibility,
+ qedi_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(qedi->shost))
+ goto kset_free;
+
+ boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 1, qedi,
+ qedi_show_boot_tgt_sec_info,
+ qedi_tgt_get_attr_visibility,
+ qedi_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(qedi->shost))
+ goto kset_free;
+
+ boot_kobj = iscsi_boot_create_initiator(qedi->boot_kset, 0, qedi,
+ qedi_show_boot_ini_info,
+ qedi_ini_get_attr_visibility,
+ qedi_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(qedi->shost))
+ goto kset_free;
+
+ boot_kobj = iscsi_boot_create_ethernet(qedi->boot_kset, 0, qedi,
+ qedi_show_boot_eth_info,
+ qedi_eth_get_attr_visibility,
+ qedi_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ return 0;
+
+put_host:
+ scsi_host_put(qedi->shost);
+kset_free:
+ iscsi_boot_destroy_kset(qedi->boot_kset);
+ return -ENOMEM;
+}
+
static void __qedi_remove(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
@@ -1724,6 +2136,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi->ll2_recv_thread = NULL;
}
qedi_ll2_free_skbs(qedi);
+
+ if (qedi->boot_kset)
+ iscsi_boot_destroy_kset(qedi->boot_kset);
}
}
@@ -1967,6 +2382,10 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
/* F/w needs 1st task context memory entry for performance */
set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
atomic_set(&qedi->num_offloads, 0);
+
+ if (qedi_setup_boot_info(qedi))
+ QEDI_ERR(&qedi->dbg_ctx,
+ "No iSCSI boot target configured\n");
}
return 0;
diff --git a/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
new file mode 100644
index 000000000000..df39b69b366d
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
@@ -0,0 +1,210 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef NVM_ISCSI_CFG_H
+#define NVM_ISCSI_CFG_H
+
+#define NUM_OF_ISCSI_TARGET_PER_PF 4 /* Defined as per the
+ * ISCSI IBFT constraint
+ */
+#define NUM_OF_ISCSI_PF_SUPPORTED 4 /* One PF per Port -
+ * assuming 4 port card
+ */
+
+#define NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN 256
+
+union nvm_iscsi_dhcp_vendor_id {
+ u32 value[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN / 4];
+ u8 byte[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN];
+};
+
+#define NVM_ISCSI_IPV4_ADDR_BYTE_LEN 4
+union nvm_iscsi_ipv4_addr {
+ u32 addr;
+ u8 byte[NVM_ISCSI_IPV4_ADDR_BYTE_LEN];
+};
+
+#define NVM_ISCSI_IPV6_ADDR_BYTE_LEN 16
+union nvm_iscsi_ipv6_addr {
+ u32 addr[4];
+ u8 byte[NVM_ISCSI_IPV6_ADDR_BYTE_LEN];
+};
+
+struct nvm_iscsi_initiator_ipv4 {
+ union nvm_iscsi_ipv4_addr addr; /* 0x0 */
+ union nvm_iscsi_ipv4_addr subnet_mask; /* 0x4 */
+ union nvm_iscsi_ipv4_addr gateway; /* 0x8 */
+ union nvm_iscsi_ipv4_addr primary_dns; /* 0xC */
+ union nvm_iscsi_ipv4_addr secondary_dns; /* 0x10 */
+ union nvm_iscsi_ipv4_addr dhcp_addr; /* 0x14 */
+
+ union nvm_iscsi_ipv4_addr isns_server; /* 0x18 */
+ union nvm_iscsi_ipv4_addr slp_server; /* 0x1C */
+ union nvm_iscsi_ipv4_addr primay_radius_server; /* 0x20 */
+ union nvm_iscsi_ipv4_addr secondary_radius_server; /* 0x24 */
+
+ union nvm_iscsi_ipv4_addr rsvd[4]; /* 0x28 */
+};
+
+struct nvm_iscsi_initiator_ipv6 {
+ union nvm_iscsi_ipv6_addr addr; /* 0x0 */
+ union nvm_iscsi_ipv6_addr subnet_mask; /* 0x10 */
+ union nvm_iscsi_ipv6_addr gateway; /* 0x20 */
+ union nvm_iscsi_ipv6_addr primary_dns; /* 0x30 */
+ union nvm_iscsi_ipv6_addr secondary_dns; /* 0x40 */
+ union nvm_iscsi_ipv6_addr dhcp_addr; /* 0x50 */
+
+ union nvm_iscsi_ipv6_addr isns_server; /* 0x60 */
+ union nvm_iscsi_ipv6_addr slp_server; /* 0x70 */
+ union nvm_iscsi_ipv6_addr primay_radius_server; /* 0x80 */
+ union nvm_iscsi_ipv6_addr secondary_radius_server; /* 0x90 */
+
+ union nvm_iscsi_ipv6_addr rsvd[3]; /* 0xA0 */
+
+ u32 config; /* 0xD0 */
+#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_MASK 0x000000FF
+#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_OFFSET 0
+
+ u32 rsvd_1[3];
+};
+
+#define NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN 256
+union nvm_iscsi_name {
+ u32 value[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN / 4];
+ u8 byte[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN];
+};
+
+#define NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN 256
+union nvm_iscsi_chap_name {
+ u32 value[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN / 4];
+ u8 byte[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN];
+};
+
+#define NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN 16 /* md5 need per RFC1996
+ * is 16 octets
+ */
+union nvm_iscsi_chap_password {
+ u32 value[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN / 4];
+ u8 byte[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN];
+};
+
+union nvm_iscsi_lun {
+ u8 byte[8];
+ u32 value[2];
+};
+
+struct nvm_iscsi_generic {
+ u32 ctrl_flags; /* 0x0 */
+#define NVM_ISCSI_CFG_GEN_CHAP_ENABLED BIT(0)
+#define NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED BIT(1)
+#define NVM_ISCSI_CFG_GEN_DHCP_ISCSI_CONFIG_ENABLED BIT(2)
+#define NVM_ISCSI_CFG_GEN_IPV6_ENABLED BIT(3)
+#define NVM_ISCSI_CFG_GEN_IPV4_FALLBACK_ENABLED BIT(4)
+#define NVM_ISCSI_CFG_GEN_ISNS_WORLD_LOGIN BIT(5)
+#define NVM_ISCSI_CFG_GEN_ISNS_SELECTIVE_LOGIN BIT(6)
+#define NVM_ISCSI_CFG_GEN_ADDR_REDIRECT_ENABLED BIT(7)
+#define NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED BIT(8)
+
+ u32 timeout; /* 0x4 */
+#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_MASK 0x0000FFFF
+#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_OFFSET 0
+#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_MASK 0xFFFF0000
+#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_OFFSET 16
+
+ union nvm_iscsi_dhcp_vendor_id dhcp_vendor_id; /* 0x8 */
+ u32 rsvd[62]; /* 0x108 */
+};
+
+struct nvm_iscsi_initiator {
+ struct nvm_iscsi_initiator_ipv4 ipv4; /* 0x0 */
+ struct nvm_iscsi_initiator_ipv6 ipv6; /* 0x38 */
+
+ union nvm_iscsi_name initiator_name; /* 0x118 */
+ union nvm_iscsi_chap_name chap_name; /* 0x218 */
+ union nvm_iscsi_chap_password chap_password; /* 0x318 */
+
+ u32 generic_cont0; /* 0x398 */
+#define NVM_ISCSI_CFG_INITIATOR_VLAN_MASK 0x0000FFFF
+#define NVM_ISCSI_CFG_INITIATOR_VLAN_OFFSET 0
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_MASK 0x00030000
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_OFFSET 16
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4 1
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_6 2
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4_AND_6 3
+
+ u32 ctrl_flags;
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_PRIORITY_V6 BIT(0)
+#define NVM_ISCSI_CFG_INITIATOR_VLAN_ENABLED BIT(1)
+
+ u32 rsvd[116]; /* 0x32C */
+};
+
+struct nvm_iscsi_target {
+ u32 ctrl_flags; /* 0x0 */
+#define NVM_ISCSI_CFG_TARGET_ENABLED BIT(0)
+#define NVM_ISCSI_CFG_BOOT_TIME_LOGIN_STATUS BIT(1)
+
+ u32 generic_cont0; /* 0x4 */
+#define NVM_ISCSI_CFG_TARGET_TCP_PORT_MASK 0x0000FFFF
+#define NVM_ISCSI_CFG_TARGET_TCP_PORT_OFFSET 0
+
+ u32 ip_ver;
+#define NVM_ISCSI_CFG_IPv4 4
+#define NVM_ISCSI_CFG_IPv6 6
+
+ u32 rsvd_1[7]; /* 0x24 */
+ union nvm_iscsi_ipv4_addr ipv4_addr; /* 0x28 */
+ union nvm_iscsi_ipv6_addr ipv6_addr; /* 0x2C */
+ union nvm_iscsi_lun lun; /* 0x3C */
+
+ union nvm_iscsi_name target_name; /* 0x44 */
+ union nvm_iscsi_chap_name chap_name; /* 0x144 */
+ union nvm_iscsi_chap_password chap_password; /* 0x244 */
+
+ u32 rsvd_2[107]; /* 0x2C4 */
+};
+
+struct nvm_iscsi_block {
+ u32 id; /* 0x0 */
+#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK 0x0000000F
+#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET 0
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK 0x00000FF0
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET 4
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY BIT(0)
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED BIT(1)
+
+ u32 rsvd_1[5]; /* 0x4 */
+
+ struct nvm_iscsi_generic generic; /* 0x18 */
+ struct nvm_iscsi_initiator initiator; /* 0x218 */
+ struct nvm_iscsi_target target[NUM_OF_ISCSI_TARGET_PER_PF];
+ /* 0x718 */
+
+ u32 rsvd_2[58]; /* 0x1718 */
+ /* total size - 0x1800 - 6K block */
+};
+
+struct nvm_iscsi_cfg {
+ u32 id; /* 0x0 */
+#define NVM_ISCSI_CFG_BLK_VERSION_MINOR_MASK 0x000000FF
+#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR_MASK 0x0000FF00
+#define NVM_ISCSI_CFG_BLK_SIGNATURE_MASK 0xFFFF0000
+#define NVM_ISCSI_CFG_BLK_SIGNATURE 0x49430000 /* IC - Iscsi
+ * Config
+ */
+
+#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR 0
+#define NVM_ISCSI_CFG_BLK_VERSION_MINOR 10
+#define NVM_ISCSI_CFG_BLK_VERSION ((NVM_ISCSI_CFG_BLK_VERSION_MAJOR << 8) | \
+ NVM_ISCSI_CFG_BLK_VERSION_MINOR)
+
+ struct nvm_iscsi_block block[NUM_OF_ISCSI_PF_SUPPORTED]; /* 0x4 */
+};
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2a0173e5d10e..e101cd3043b9 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1874,36 +1874,13 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct fc_port *sess)
{
struct qla_hw_data *ha = vha->hw;
- struct se_session *se_sess = sess->se_sess;
struct qla_tgt_mgmt_cmd *mcmd;
- struct qla_tgt_cmd *cmd;
- struct se_cmd *se_cmd;
int rc;
- bool found_lun = false;
- unsigned long flags;
-
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
- if (se_cmd->tag == abts->exchange_addr_to_abort) {
- found_lun = true;
- break;
- }
- }
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- /* cmd not in LIO lists, look in qla list */
- if (!found_lun) {
- if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
- /* send TASK_ABORT response immediately */
- qlt_24xx_send_abts_resp(ha->base_qpair, abts,
- FCP_TMF_CMPL, false);
- return 0;
- } else {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
- "unable to find cmd in driver or LIO for tag 0x%x\n",
- abts->exchange_addr_to_abort);
- return -ENOENT;
- }
+ if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
+ /* send TASK_ABORT response immediately */
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
+ return 0;
}
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
@@ -1919,14 +1896,17 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
}
memset(mcmd, 0, sizeof(*mcmd));
- cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
mcmd->reset_count = ha->base_qpair->chip_reset;
mcmd->tmr_func = QLA_TGT_ABTS;
mcmd->qpair = ha->base_qpair;
- rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func,
+ /*
+ * LUN is looked up by target-core internally based on the passed
+ * abts->exchange_addr_to_abort tag.
+ */
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func,
abts->exchange_addr_to_abort);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
@@ -3747,7 +3727,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
h &= QLA_CMD_HANDLE_MASK;
if (h != QLA_TGT_NULL_HANDLE) {
- if (unlikely(h > req->num_outstanding_cmds)) {
+ if (unlikely(h >= req->num_outstanding_cmds)) {
ql_dbg(ql_dbg_tgt, vha, 0xe052,
"qla_target(%d): Wrong handle %x received\n",
vha->vp_idx, handle);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index c4b414833b86..b20da0d27ad7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -600,11 +600,13 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
struct fc_port *sess = mcmd->sess;
struct se_cmd *se_cmd = &mcmd->se_cmd;
int transl_tmr_func = 0;
+ int flags = TARGET_SCF_ACK_KREF;
switch (tmr_func) {
case QLA_TGT_ABTS:
pr_debug("%ld: ABTS received\n", sess->vha->host_no);
transl_tmr_func = TMR_ABORT_TASK;
+ flags |= TARGET_SCF_LOOKUP_LUN_FROM_TAG;
break;
case QLA_TGT_2G_ABORT_TASK:
pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no);
@@ -637,7 +639,7 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
}
return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
- transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
+ transl_tmr_func, GFP_ATOMIC, tag, flags);
}
static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 21225d62b0c1..1e82d4128a84 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -758,8 +758,11 @@ static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
if (hp->dxferp || hp->dxfer_len > 0)
return false;
return true;
- case SG_DXFER_TO_DEV:
case SG_DXFER_FROM_DEV:
+ if (hp->dxfer_len < 0)
+ return false;
+ return true;
+ case SG_DXFER_TO_DEV:
case SG_DXFER_TO_FROM_DEV:
if (!hp->dxferp || hp->dxfer_len == 0)
return false;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8b93197daefe..9be211d68b15 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -837,6 +837,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
.eh_timed_out = virtscsi_eh_timed_out,
+ .slave_alloc = virtscsi_device_alloc,
.can_queue = 1024,
.dma_boundary = UINT_MAX,
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index e389009fca42..a4e3ae8f0c85 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -915,6 +915,8 @@ static int spinand_probe(struct spi_device *spi_nand)
chip->waitfunc = spinand_wait;
chip->options |= NAND_CACHEPRG;
chip->select_chip = spinand_select_chip;
+ chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
+ chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
mtd = nand_to_mtd(chip);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 3fdca2cdd8da..74e4975dd1b1 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -488,15 +488,13 @@ EXPORT_SYMBOL(iscsit_queue_rsp);
void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
- bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
-
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node) &&
!(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
- __iscsit_free_cmd(cmd, scsi_cmd, true);
+ __iscsit_free_cmd(cmd, true);
}
EXPORT_SYMBOL(iscsit_aborted_task);
@@ -1251,12 +1249,8 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* execution. These exceptions are processed in CmdSN order using
* iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
*/
- if (cmd->sense_reason) {
- if (cmd->reject_reason)
- return 0;
-
+ if (cmd->sense_reason)
return 1;
- }
/*
* Call directly into transport_generic_new_cmd() to perform
* the backend memory allocation.
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 903b667f8e01..f9bc8ec6fb6b 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -47,18 +47,21 @@ static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
}
}
-static void chap_gen_challenge(
+static int chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
char *c_str,
unsigned int *c_len)
{
+ int ret;
unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
struct iscsi_chap *chap = conn->auth_protocol;
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
- get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
+ ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
+ if (unlikely(ret))
+ return ret;
chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
@@ -69,6 +72,7 @@ static void chap_gen_challenge(
pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
challenge_asciihex);
+ return 0;
}
static int chap_check_algorithm(const char *a_str)
@@ -143,6 +147,7 @@ static struct iscsi_chap *chap_server_open(
case CHAP_DIGEST_UNKNOWN:
default:
pr_err("Unsupported CHAP_A value\n");
+ kfree(conn->auth_protocol);
return NULL;
}
@@ -156,7 +161,10 @@ static struct iscsi_chap *chap_server_open(
/*
* Generate Challenge.
*/
- chap_gen_challenge(conn, 1, aic_str, aic_len);
+ if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) {
+ kfree(conn->auth_protocol);
+ return NULL;
+ }
return chap;
}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 535a8e06a401..0dd4c45f7575 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -781,6 +781,7 @@ DEF_TPG_ATTRIB(default_erl);
DEF_TPG_ATTRIB(t10_pi);
DEF_TPG_ATTRIB(fabric_prot_type);
DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
+DEF_TPG_ATTRIB(login_keys_workaround);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_attr_authentication,
@@ -796,6 +797,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_attr_t10_pi,
&iscsi_tpg_attrib_attr_fabric_prot_type,
&iscsi_tpg_attrib_attr_tpg_enabled_sendtargets,
+ &iscsi_tpg_attrib_attr_login_keys_workaround,
NULL,
};
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 92b96b51d506..e9bdc8b86e7d 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -245,22 +245,26 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
return 0;
}
-static void iscsi_login_set_conn_values(
+static int iscsi_login_set_conn_values(
struct iscsi_session *sess,
struct iscsi_conn *conn,
__be16 cid)
{
+ int ret;
conn->sess = sess;
conn->cid = be16_to_cpu(cid);
/*
* Generate a random Status sequence number (statsn) for the new
* iSCSI connection.
*/
- get_random_bytes(&conn->stat_sn, sizeof(u32));
+ ret = get_random_bytes_wait(&conn->stat_sn, sizeof(u32));
+ if (unlikely(ret))
+ return ret;
mutex_lock(&auth_id_lock);
conn->auth_id = iscsit_global->auth_id++;
mutex_unlock(&auth_id_lock);
+ return 0;
}
__printf(2, 3) int iscsi_change_param_sprintf(
@@ -306,7 +310,11 @@ static int iscsi_login_zero_tsih_s1(
return -ENOMEM;
}
- iscsi_login_set_conn_values(sess, conn, pdu->cid);
+ ret = iscsi_login_set_conn_values(sess, conn, pdu->cid);
+ if (unlikely(ret)) {
+ kfree(sess);
+ return ret;
+ }
sess->init_task_tag = pdu->itt;
memcpy(&sess->isid, pdu->isid, 6);
sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
@@ -497,8 +505,7 @@ static int iscsi_login_non_zero_tsih_s1(
{
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
- iscsi_login_set_conn_values(NULL, conn, pdu->cid);
- return 0;
+ return iscsi_login_set_conn_values(NULL, conn, pdu->cid);
}
/*
@@ -554,9 +561,8 @@ static int iscsi_login_non_zero_tsih_s2(
atomic_set(&sess->session_continuation, 1);
spin_unlock_bh(&sess->conn_lock);
- iscsi_login_set_conn_values(sess, conn, pdu->cid);
-
- if (iscsi_copy_param_list(&conn->param_list,
+ if (iscsi_login_set_conn_values(sess, conn, pdu->cid) < 0 ||
+ iscsi_copy_param_list(&conn->param_list,
conn->tpg->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 6f88b31242b0..7a6751fecd32 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -655,28 +655,6 @@ err:
iscsit_deaccess_np(np, tpg, tpg_np);
}
-static void iscsi_target_do_cleanup(struct work_struct *work)
-{
- struct iscsi_conn *conn = container_of(work,
- struct iscsi_conn, login_cleanup_work.work);
- struct sock *sk = conn->sock->sk;
- struct iscsi_login *login = conn->login;
- struct iscsi_np *np = login->np;
- struct iscsi_portal_group *tpg = conn->tpg;
- struct iscsi_tpg_np *tpg_np = conn->tpg_np;
-
- pr_debug("Entering iscsi_target_do_cleanup\n");
-
- cancel_delayed_work_sync(&conn->login_work);
- conn->orig_state_change(sk);
-
- iscsi_target_restore_sock_callbacks(conn);
- iscsi_target_login_drop(conn, login);
- iscsit_deaccess_np(np, tpg, tpg_np);
-
- pr_debug("iscsi_target_do_cleanup done()\n");
-}
-
static void iscsi_target_sk_state_change(struct sock *sk)
{
struct iscsi_conn *conn;
@@ -886,7 +864,8 @@ static int iscsi_target_handle_csg_zero(
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
- conn->param_list);
+ conn->param_list,
+ conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0)
return -1;
@@ -956,7 +935,8 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
- conn->param_list);
+ conn->param_list,
+ conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
@@ -1082,7 +1062,6 @@ int iscsi_target_locate_portal(
int sessiontype = 0, ret = 0, tag_num, tag_size;
INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx);
- INIT_DELAYED_WORK(&conn->login_cleanup_work, iscsi_target_do_cleanup);
iscsi_target_set_sock_callbacks(conn);
login->np = np;
@@ -1331,7 +1310,6 @@ int iscsi_target_start_negotiation(
if (ret < 0) {
cancel_delayed_work_sync(&conn->login_work);
- cancel_delayed_work_sync(&conn->login_cleanup_work);
iscsi_target_restore_sock_callbacks(conn);
iscsi_remove_failed_auth_entry(conn);
}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index fce627628200..caab1045742d 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -765,7 +765,8 @@ static int iscsi_check_for_auth_key(char *key)
return 0;
}
-static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param,
+ bool keys_workaround)
{
if (IS_TYPE_BOOL_AND(param)) {
if (!strcmp(param->value, NO))
@@ -773,19 +774,31 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
} else if (IS_TYPE_BOOL_OR(param)) {
if (!strcmp(param->value, YES))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * Required for gPXE iSCSI boot client
- */
- if (!strcmp(param->name, IMMEDIATEDATA))
- SET_PSTATE_REPLY_OPTIONAL(param);
+
+ if (keys_workaround) {
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, IMMEDIATEDATA))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
} else if (IS_TYPE_NUMBER(param)) {
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * Required for gPXE iSCSI boot client
- */
- if (!strcmp(param->name, MAXCONNECTIONS))
- SET_PSTATE_REPLY_OPTIONAL(param);
+
+ if (keys_workaround) {
+ /*
+ * Required for Mellanox Flexboot PXE boot ROM
+ */
+ if (!strcmp(param->name, FIRSTBURSTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, MAXCONNECTIONS))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
} else if (IS_PHASE_DECLARATIVE(param))
SET_PSTATE_REPLY_OPTIONAL(param);
}
@@ -1422,7 +1435,8 @@ int iscsi_encode_text_output(
u8 sender,
char *textbuf,
u32 *length,
- struct iscsi_param_list *param_list)
+ struct iscsi_param_list *param_list,
+ bool keys_workaround)
{
char *output_buf = NULL;
struct iscsi_extra_response *er;
@@ -1458,7 +1472,8 @@ int iscsi_encode_text_output(
*length += 1;
output_buf = textbuf + *length;
SET_PSTATE_PROPOSER(param);
- iscsi_check_proposer_for_optional_reply(param);
+ iscsi_check_proposer_for_optional_reply(param,
+ keys_workaround);
pr_debug("Sending key: %s=%s\n",
param->name, param->value);
}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index 9962ccf0ccd7..c47b73f57528 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -46,7 +46,7 @@ extern int iscsi_extract_key_value(char *, char **, char **);
extern int iscsi_update_param_value(struct iscsi_param *, char *);
extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
- struct iscsi_param_list *);
+ struct iscsi_param_list *, bool);
extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
struct iscsi_param_list *);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 2e7e08dbda48..594d07a1e995 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -227,6 +227,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
a->t10_pi = TA_DEFAULT_T10_PI;
a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
+ a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -311,11 +312,9 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
int ret;
- spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state == TPG_STATE_ACTIVE) {
pr_err("iSCSI target portal group: %hu is already"
" active, ignoring request.\n", tpg->tpgt);
- spin_unlock(&tpg->tpg_state_lock);
return -EINVAL;
}
/*
@@ -324,10 +323,8 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
* is enforced (as per default), and remove the NONE option.
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
- if (!param) {
- spin_unlock(&tpg->tpg_state_lock);
+ if (!param)
return -EINVAL;
- }
if (tpg->tpg_attrib.authentication) {
if (!strcmp(param->value, NONE)) {
@@ -341,6 +338,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
goto err;
}
+ spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_ACTIVE;
spin_unlock(&tpg->tpg_state_lock);
@@ -353,7 +351,6 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
return 0;
err:
- spin_unlock(&tpg->tpg_state_lock);
return ret;
}
@@ -899,3 +896,21 @@ int iscsit_ta_tpg_enabled_sendtargets(
return 0;
}
+
+int iscsit_ta_login_keys_workaround(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->login_keys_workaround = flag;
+ pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ",
+ tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF");
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index ceba29851167..59fd3cabe89d 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -48,5 +48,6 @@ extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 7d3e2fcc26a0..1e36f83b5961 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -167,6 +167,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
cmd->se_cmd.map_tag = tag;
cmd->conn = conn;
+ cmd->data_direction = DMA_NONE;
INIT_LIST_HEAD(&cmd->i_conn_node);
INIT_LIST_HEAD(&cmd->datain_list);
INIT_LIST_HEAD(&cmd->cmd_r2t_list);
@@ -711,19 +712,16 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
}
EXPORT_SYMBOL(iscsit_release_cmd);
-void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
- bool check_queues)
+void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
{
struct iscsi_conn *conn = cmd->conn;
- if (scsi_cmd) {
- if (cmd->data_direction == DMA_TO_DEVICE) {
- iscsit_stop_dataout_timer(cmd);
- iscsit_free_r2ts_from_list(cmd);
- }
- if (cmd->data_direction == DMA_FROM_DEVICE)
- iscsit_free_all_datain_reqs(cmd);
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ iscsit_stop_dataout_timer(cmd);
+ iscsit_free_r2ts_from_list(cmd);
}
+ if (cmd->data_direction == DMA_FROM_DEVICE)
+ iscsit_free_all_datain_reqs(cmd);
if (conn && check_queues) {
iscsit_remove_cmd_from_immediate_queue(cmd, conn);
@@ -736,50 +734,18 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
{
- struct se_cmd *se_cmd = NULL;
+ struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
int rc;
- bool op_scsi = false;
- /*
- * Determine if a struct se_cmd is associated with
- * this struct iscsi_cmd.
- */
- switch (cmd->iscsi_opcode) {
- case ISCSI_OP_SCSI_CMD:
- op_scsi = true;
- /*
- * Fallthrough
- */
- case ISCSI_OP_SCSI_TMFUNC:
- se_cmd = &cmd->se_cmd;
- __iscsit_free_cmd(cmd, op_scsi, shutdown);
+
+ __iscsit_free_cmd(cmd, shutdown);
+ if (se_cmd) {
rc = transport_generic_free_cmd(se_cmd, shutdown);
if (!rc && shutdown && se_cmd->se_sess) {
- __iscsit_free_cmd(cmd, op_scsi, shutdown);
+ __iscsit_free_cmd(cmd, shutdown);
target_put_sess_cmd(se_cmd);
}
- break;
- case ISCSI_OP_REJECT:
- /*
- * Handle special case for REJECT when iscsi_add_reject*() has
- * overwritten the original iscsi_opcode assignment, and the
- * associated cmd->se_cmd needs to be released.
- */
- if (cmd->se_cmd.se_tfo != NULL) {
- se_cmd = &cmd->se_cmd;
- __iscsit_free_cmd(cmd, true, shutdown);
-
- rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
- if (!rc && shutdown && se_cmd->se_sess) {
- __iscsit_free_cmd(cmd, true, shutdown);
- target_put_sess_cmd(se_cmd);
- }
- break;
- }
- /* Fall-through */
- default:
- __iscsit_free_cmd(cmd, false, shutdown);
+ } else {
iscsit_release_cmd(cmd);
- break;
}
}
EXPORT_SYMBOL(iscsit_free_cmd);
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 9e4197af8708..425160565d0c 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -37,7 +37,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co
extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
extern void iscsit_release_cmd(struct iscsi_cmd *);
-extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
+extern void __iscsit_free_cmd(struct iscsi_cmd *, bool);
extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
extern int iscsit_check_session_usage_count(struct iscsi_session *);
extern void iscsit_dec_session_usage_count(struct iscsi_session *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 5091b31b3e56..b6a913e38b30 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -51,19 +51,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd);
*/
static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
{
- /*
- * Do not release struct se_cmd's containing a valid TMR
- * pointer. These will be released directly in tcm_loop_device_reset()
- * with transport_generic_free_cmd().
- */
- if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- return 0;
- /*
- * Release the struct se_cmd, which will make a callback to release
- * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
- */
- transport_generic_free_cmd(se_cmd, 0);
- return 1;
+ return transport_generic_free_cmd(se_cmd, 0);
}
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
@@ -218,10 +206,8 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
{
struct se_cmd *se_cmd = NULL;
struct se_session *se_sess;
- struct se_portal_group *se_tpg;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_cmd *tl_cmd = NULL;
- struct tcm_loop_tmr *tl_tmr = NULL;
int ret = TMR_FUNCTION_FAILED, rc;
/*
@@ -240,55 +226,29 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
return ret;
}
- tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
- if (!tl_tmr) {
- pr_err("Unable to allocate memory for tl_tmr\n");
- goto release;
- }
- init_waitqueue_head(&tl_tmr->tl_tmr_wait);
+ init_completion(&tl_cmd->tmr_done);
se_cmd = &tl_cmd->tl_se_cmd;
- se_tpg = &tl_tpg->tl_se_tpg;
se_sess = tl_tpg->tl_nexus->se_sess;
- /*
- * Initialize struct se_cmd descriptor from target_core_mod infrastructure
- */
- transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
- DMA_NONE, TCM_SIMPLE_TAG,
- &tl_cmd->tl_sense_buf[0]);
- rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
+ rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
+ NULL, tmr, GFP_KERNEL, task,
+ TARGET_SCF_ACK_KREF);
if (rc < 0)
goto release;
+ wait_for_completion(&tl_cmd->tmr_done);
+ ret = se_cmd->se_tmr_req->response;
+ target_put_sess_cmd(se_cmd);
- if (tmr == TMR_ABORT_TASK)
- se_cmd->se_tmr_req->ref_task_tag = task;
+out:
+ return ret;
- /*
- * Locate the underlying TCM struct se_lun
- */
- if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
- ret = TMR_LUN_DOES_NOT_EXIST;
- goto release;
- }
- /*
- * Queue the TMR to TCM Core and sleep waiting for
- * tcm_loop_queue_tm_rsp() to wake us up.
- */
- transport_generic_handle_tmr(se_cmd);
- wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
- /*
- * The TMR LUN_RESET has completed, check the response status and
- * then release allocations.
- */
- ret = se_cmd->se_tmr_req->response;
release:
if (se_cmd)
- transport_generic_free_cmd(se_cmd, 1);
+ transport_generic_free_cmd(se_cmd, 0);
else
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
- kfree(tl_tmr);
- return ret;
+ goto out;
}
static int tcm_loop_abort_task(struct scsi_cmnd *sc)
@@ -669,14 +629,11 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
{
- struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
- struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
- /*
- * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
- * and wake up the wait_queue_head_t in tcm_loop_device_reset()
- */
- atomic_set(&tl_tmr->tmr_complete, 1);
- wake_up(&tl_tmr->tl_tmr_wait);
+ struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+ struct tcm_loop_cmd, tl_se_cmd);
+
+ /* Wake up tcm_loop_issue_tmr(). */
+ complete(&tl_cmd->tmr_done);
}
static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index a8a230b4e6b5..3acc43c05117 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -16,15 +16,11 @@ struct tcm_loop_cmd {
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tl_se_cmd;
struct work_struct work;
+ struct completion tmr_done;
/* Sense buffer that will be mapped into outgoing status */
unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
};
-struct tcm_loop_tmr {
- atomic_t tmr_complete;
- wait_queue_head_t tl_tmr_wait;
-};
-
struct tcm_loop_nexus {
/*
* Pointer to TCM session for I_T Nexus
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fc4a9c303d55..a91b7c25ffd4 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -205,8 +205,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
/*
* TARGET PORT GROUP
*/
- buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
- buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
+ put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
+ off += 2;
off++; /* Skip over Reserved */
/*
@@ -235,8 +235,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
/*
* Set RELATIVE TARGET PORT IDENTIFIER
*/
- buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
- buf[off++] = (lun->lun_rtpi & 0xff);
+ put_unaligned_be16(lun->lun_rtpi, &buf[off]);
+ off += 2;
rd_len += 4;
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0326607e5ab8..7e87d952bb7a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1085,6 +1085,24 @@ static ssize_t block_size_store(struct config_item *item,
return count;
}
+static ssize_t alua_support_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ u8 flags = da->da_dev->transport->transport_flags;
+
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
+}
+
+static ssize_t pgr_support_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ u8 flags = da->da_dev->transport->transport_flags;
+
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
+}
+
CONFIGFS_ATTR(, emulate_model_alias);
CONFIGFS_ATTR(, emulate_dpo);
CONFIGFS_ATTR(, emulate_fua_write);
@@ -1116,6 +1134,8 @@ CONFIGFS_ATTR(, unmap_granularity);
CONFIGFS_ATTR(, unmap_granularity_alignment);
CONFIGFS_ATTR(, unmap_zeroes_data);
CONFIGFS_ATTR(, max_write_same_len);
+CONFIGFS_ATTR_RO(, alua_support);
+CONFIGFS_ATTR_RO(, pgr_support);
/*
* dev_attrib attributes for devices using the target core SBC/SPC
@@ -1154,6 +1174,8 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
&attr_unmap_granularity_alignment,
&attr_unmap_zeroes_data,
&attr_max_write_same_len,
+ &attr_alua_support,
+ &attr_pgr_support,
NULL,
};
EXPORT_SYMBOL(sbc_attrib_attrs);
@@ -1168,6 +1190,8 @@ struct configfs_attribute *passthrough_attrib_attrs[] = {
&attr_hw_block_size,
&attr_hw_max_sectors,
&attr_hw_queue_depth,
+ &attr_alua_support,
+ &attr_pgr_support,
NULL,
};
EXPORT_SYMBOL(passthrough_attrib_attrs);
@@ -2236,7 +2260,11 @@ static void target_core_dev_release(struct config_item *item)
target_free_device(dev);
}
-static struct configfs_item_operations target_core_dev_item_ops = {
+/*
+ * Used in target_core_fabric_configfs.c to verify valid se_device symlink
+ * within target_fabric_port_link()
+ */
+struct configfs_item_operations target_core_dev_item_ops = {
.release = target_core_dev_release,
};
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8add07f387f9..e8dd6da164b2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -49,8 +49,9 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
-DEFINE_MUTEX(g_device_mutex);
-LIST_HEAD(g_device_list);
+static DEFINE_MUTEX(device_mutex);
+static LIST_HEAD(device_list);
+static DEFINE_IDR(devices_idr);
static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
@@ -168,11 +169,20 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
rcu_read_lock();
deve = target_nacl_find_deve(nacl, unpacked_lun);
if (deve) {
- se_cmd->se_lun = rcu_dereference(deve->se_lun);
se_lun = rcu_dereference(deve->se_lun);
+
+ if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
+ se_lun = NULL;
+ goto out_unlock;
+ }
+
+ se_cmd->se_lun = rcu_dereference(deve->se_lun);
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ se_cmd->lun_ref_active = true;
}
+out_unlock:
rcu_read_unlock();
if (!se_lun) {
@@ -182,9 +192,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
unpacked_lun);
return -ENODEV;
}
- /*
- * XXX: Add percpu se_lun->lun_ref reference count for TMR
- */
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
@@ -756,19 +763,16 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
if (!dev)
return NULL;
- dev->dev_link_magic = SE_DEV_LINK_MAGIC;
dev->se_hba = hba;
dev->transport = hba->backend->ops;
dev->prot_length = sizeof(struct t10_pi_tuple);
dev->hba_index = hba->hba_index;
- INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
- INIT_LIST_HEAD(&dev->g_dev_node);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->dev_reservation_lock);
@@ -851,7 +855,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
attrib->unmap_granularity = q->limits.discard_granularity / block_size;
attrib->unmap_granularity_alignment = q->limits.discard_alignment /
block_size;
- attrib->unmap_zeroes_data = 0;
+ attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors);
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
@@ -875,10 +879,79 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
}
EXPORT_SYMBOL(target_to_linux_sector);
+/**
+ * target_find_device - find a se_device by its dev_index
+ * @id: dev_index
+ * @do_depend: true if caller needs target_depend_item to be done
+ *
+ * If do_depend is true, the caller must do a target_undepend_item
+ * when finished using the device.
+ *
+ * If do_depend is false, the caller must be called in a configfs
+ * callback or during removal.
+ */
+struct se_device *target_find_device(int id, bool do_depend)
+{
+ struct se_device *dev;
+
+ mutex_lock(&device_mutex);
+ dev = idr_find(&devices_idr, id);
+ if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item))
+ dev = NULL;
+ mutex_unlock(&device_mutex);
+ return dev;
+}
+EXPORT_SYMBOL(target_find_device);
+
+struct devices_idr_iter {
+ int (*fn)(struct se_device *dev, void *data);
+ void *data;
+};
+
+static int target_devices_idr_iter(int id, void *p, void *data)
+{
+ struct devices_idr_iter *iter = data;
+ struct se_device *dev = p;
+
+ /*
+ * We add the device early to the idr, so it can be used
+ * by backend modules during configuration. We do not want
+ * to allow other callers to access partially setup devices,
+ * so we skip them here.
+ */
+ if (!(dev->dev_flags & DF_CONFIGURED))
+ return 0;
+
+ return iter->fn(dev, iter->data);
+}
+
+/**
+ * target_for_each_device - iterate over configured devices
+ * @fn: iterator function
+ * @data: pointer to data that will be passed to fn
+ *
+ * fn must return 0 to continue looping over devices. non-zero will break
+ * from the loop and return that value to the caller.
+ */
+int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
+ void *data)
+{
+ struct devices_idr_iter iter;
+ int ret;
+
+ iter.fn = fn;
+ iter.data = data;
+
+ mutex_lock(&device_mutex);
+ ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
+ mutex_unlock(&device_mutex);
+ return ret;
+}
+
int target_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
- int ret;
+ int ret, id;
if (dev->dev_flags & DF_CONFIGURED) {
pr_err("se_dev->se_dev_ptr already set for storage"
@@ -886,9 +959,26 @@ int target_configure_device(struct se_device *dev)
return -EEXIST;
}
+ /*
+ * Add early so modules like tcmu can use during its
+ * configuration.
+ */
+ mutex_lock(&device_mutex);
+ /*
+ * Use cyclic to try and avoid collisions with devices
+ * that were recently removed.
+ */
+ id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
+ mutex_unlock(&device_mutex);
+ if (id < 0) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ dev->dev_index = id;
+
ret = dev->transport->configure_device(dev);
if (ret)
- goto out;
+ goto out_free_index;
/*
* XXX: there is not much point to have two different values here..
*/
@@ -903,12 +993,11 @@ int target_configure_device(struct se_device *dev)
dev->dev_attrib.hw_block_size);
dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
- dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
dev->creation_time = get_jiffies_64();
ret = core_setup_alua(dev);
if (ret)
- goto out;
+ goto out_free_index;
/*
* Startup the struct se_device processing thread
@@ -946,16 +1035,16 @@ int target_configure_device(struct se_device *dev)
hba->dev_count++;
spin_unlock(&hba->device_lock);
- mutex_lock(&g_device_mutex);
- list_add_tail(&dev->g_dev_node, &g_device_list);
- mutex_unlock(&g_device_mutex);
-
dev->dev_flags |= DF_CONFIGURED;
return 0;
out_free_alua:
core_alua_free_lu_gp_mem(dev);
+out_free_index:
+ mutex_lock(&device_mutex);
+ idr_remove(&devices_idr, dev->dev_index);
+ mutex_unlock(&device_mutex);
out:
se_release_vpd_for_dev(dev);
return ret;
@@ -970,9 +1059,11 @@ void target_free_device(struct se_device *dev)
if (dev->dev_flags & DF_CONFIGURED) {
destroy_workqueue(dev->tmr_wq);
- mutex_lock(&g_device_mutex);
- list_del(&dev->g_dev_node);
- mutex_unlock(&g_device_mutex);
+ dev->transport->destroy_device(dev);
+
+ mutex_lock(&device_mutex);
+ idr_remove(&devices_idr, dev->dev_index);
+ mutex_unlock(&device_mutex);
spin_lock(&hba->device_lock);
hba->dev_count--;
@@ -1087,19 +1178,19 @@ passthrough_parse_cdb(struct se_cmd *cmd,
TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
if (cdb[0] == PERSISTENT_RESERVE_IN) {
cmd->execute_cmd = target_scsi3_emulate_pr_in;
- size = (cdb[7] << 8) + cdb[8];
+ size = get_unaligned_be16(&cdb[7]);
return target_cmd_size_check(cmd, size);
}
if (cdb[0] == PERSISTENT_RESERVE_OUT) {
cmd->execute_cmd = target_scsi3_emulate_pr_out;
- size = (cdb[7] << 8) + cdb[8];
+ size = get_unaligned_be32(&cdb[5]);
return target_cmd_size_check(cmd, size);
}
if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
cmd->execute_cmd = target_scsi2_reservation_release;
if (cdb[0] == RELEASE_10)
- size = (cdb[7] << 8) | cdb[8];
+ size = get_unaligned_be16(&cdb[7]);
else
size = cmd->data_length;
return target_cmd_size_check(cmd, size);
@@ -1107,7 +1198,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
cmd->execute_cmd = target_scsi2_reservation_reserve;
if (cdb[0] == RESERVE_10)
- size = (cdb[7] << 8) | cdb[8];
+ size = get_unaligned_be16(&cdb[7]);
else
size = cmd->data_length;
return target_cmd_size_check(cmd, size);
@@ -1126,7 +1217,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
case WRITE_16:
case WRITE_VERIFY:
case WRITE_VERIFY_12:
- case 0x8e: /* WRITE_VERIFY_16 */
+ case WRITE_VERIFY_16:
case COMPARE_AND_WRITE:
case XDWRITEREAD_10:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -1135,7 +1226,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
switch (get_unaligned_be16(&cdb[8])) {
case READ_32:
case WRITE_32:
- case 0x0c: /* WRITE_VERIFY_32 */
+ case WRITE_VERIFY_32:
case XDWRITEREAD_32:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index d1e6cab8e3d3..e9e917cc6441 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -65,6 +65,8 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf)
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
+static struct configfs_item_operations target_fabric_port_item_ops;
+
/* Start of tfc_tpg_mappedlun_cit */
static int target_fabric_mappedlun_link(
@@ -72,19 +74,20 @@ static int target_fabric_mappedlun_link(
struct config_item *lun_ci)
{
struct se_dev_entry *deve;
- struct se_lun *lun = container_of(to_config_group(lun_ci),
- struct se_lun, lun_group);
+ struct se_lun *lun;
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
bool lun_access_ro;
- if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
- pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
- " %p to struct lun: %p\n", lun_ci, lun);
+ if (!lun_ci->ci_type ||
+ lun_ci->ci_type->ct_item_ops != &target_fabric_port_item_ops) {
+ pr_err("Bad lun_ci, not a valid lun_ci pointer: %p\n", lun_ci);
return -EFAULT;
}
+ lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
+
/*
* Ensure that the source port exists
*/
@@ -620,6 +623,8 @@ static struct configfs_attribute *target_fabric_port_attrs[] = {
NULL,
};
+extern struct configfs_item_operations target_core_dev_item_ops;
+
static int target_fabric_port_link(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
@@ -628,16 +633,16 @@ static int target_fabric_port_link(
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_portal_group *se_tpg;
- struct se_device *dev =
- container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
+ struct se_device *dev;
struct target_fabric_configfs *tf;
int ret;
- if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
- pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
- " %p to struct se_device: %p\n", se_dev_ci, dev);
+ if (!se_dev_ci->ci_type ||
+ se_dev_ci->ci_type->ct_item_ops != &target_core_dev_item_ops) {
+ pr_err("Bad se_dev_ci, not a valid se_dev_ci pointer: %p\n", se_dev_ci);
return -EFAULT;
}
+ dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
if (!(dev->dev_flags & DF_CONFIGURED)) {
pr_err("se_device not configured yet, cannot port link\n");
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index cb6497ce4b61..508da345b73f 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -34,6 +34,7 @@
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/export.h>
+#include <asm/unaligned.h>
#include <scsi/scsi_proto.h>
@@ -216,8 +217,7 @@ static int iscsi_get_pr_transport_id(
if (padding != 0)
len += padding;
- buf[2] = ((len >> 8) & 0xff);
- buf[3] = (len & 0xff);
+ put_unaligned_be16(len, &buf[2]);
/*
* Increment value for total payload + header length for
* full status descriptor
@@ -306,7 +306,7 @@ static char *iscsi_parse_pr_out_transport_id(
*/
if (out_tid_len) {
/* The shift works thanks to integer promotion rules */
- add_len = (buf[2] << 8) | buf[3];
+ add_len = get_unaligned_be16(&buf[2]);
tid_len = strlen(&buf[4]);
tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index e921948415c7..24cf11d9e50a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -237,13 +237,17 @@ static void fd_dev_call_rcu(struct rcu_head *p)
static void fd_free_device(struct se_device *dev)
{
+ call_rcu(&dev->rcu_head, fd_dev_call_rcu);
+}
+
+static void fd_destroy_device(struct se_device *dev)
+{
struct fd_dev *fd_dev = FD_DEV(dev);
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
- call_rcu(&dev->rcu_head, fd_dev_call_rcu);
}
static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
@@ -826,6 +830,7 @@ static const struct target_backend_ops fileio_ops = {
.detach_hba = fd_detach_hba,
.alloc_device = fd_alloc_device,
.configure_device = fd_configure_device,
+ .destroy_device = fd_destroy_device,
.free_device = fd_free_device,
.parse_cdb = fd_parse_cdb,
.set_configfs_dev_params = fd_set_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c05d38016556..ee7c7fa55dad 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -86,6 +86,7 @@ static int iblock_configure_device(struct se_device *dev)
struct block_device *bd = NULL;
struct blk_integrity *bi;
fmode_t mode;
+ unsigned int max_write_zeroes_sectors;
int ret = -ENOMEM;
if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
@@ -129,7 +130,11 @@ static int iblock_configure_device(struct se_device *dev)
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
*/
- dev->dev_attrib.max_write_same_len = 0xFFFF;
+ max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
+ if (max_write_zeroes_sectors)
+ dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
+ else
+ dev->dev_attrib.max_write_same_len = 0xFFFF;
if (blk_queue_nonrot(q))
dev->dev_attrib.is_nonrot = 1;
@@ -185,14 +190,17 @@ static void iblock_dev_call_rcu(struct rcu_head *p)
static void iblock_free_device(struct se_device *dev)
{
+ call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
+}
+
+static void iblock_destroy_device(struct se_device *dev)
+{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
if (ib_dev->ibd_bd != NULL)
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
if (ib_dev->ibd_bio_set != NULL)
bioset_free(ib_dev->ibd_bio_set);
-
- call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
}
static unsigned long long iblock_emulate_read_cap_with_block_size(
@@ -415,28 +423,31 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
}
static sense_reason_t
-iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd)
+iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct scatterlist *sg = &cmd->t_data_sg[0];
- struct page *page = NULL;
- int ret;
+ unsigned char *buf, zero = 0x00, *p = &zero;
+ int rc, ret;
- if (sg->offset) {
- page = alloc_page(GFP_KERNEL);
- if (!page)
- return TCM_OUT_OF_RESOURCES;
- sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page),
- dev->dev_attrib.block_size);
- }
+ buf = kmap(sg_page(sg)) + sg->offset;
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ /*
+ * Fall back to block_execute_write_same() slow-path if
+ * incoming WRITE_SAME payload does not contain zeros.
+ */
+ rc = memcmp(buf, p, cmd->data_length);
+ kunmap(sg_page(sg));
- ret = blkdev_issue_write_same(bdev,
+ if (rc)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ ret = blkdev_issue_zeroout(bdev,
target_to_linux_sector(dev, cmd->t_task_lba),
target_to_linux_sector(dev,
sbc_get_write_same_sectors(cmd)),
- GFP_KERNEL, page ? page : sg_page(sg));
- if (page)
- __free_page(page);
+ GFP_KERNEL, false);
if (ret)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -472,8 +483,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
return TCM_INVALID_CDB_FIELD;
}
- if (bdev_write_same(bdev))
- return iblock_execute_write_same_direct(bdev, cmd);
+ if (bdev_write_zeroes_sectors(bdev)) {
+ if (!iblock_execute_zero_out(bdev, cmd))
+ return 0;
+ }
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr)
@@ -848,6 +861,7 @@ static const struct target_backend_ops iblock_ops = {
.detach_hba = iblock_detach_hba,
.alloc_device = iblock_alloc_device,
.configure_device = iblock_configure_device,
+ .destroy_device = iblock_destroy_device,
.free_device = iblock_free_device,
.parse_cdb = iblock_parse_cdb,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 0912de7c0cf8..f30e8ac13386 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -56,9 +56,6 @@ struct target_fabric_configfs {
extern struct t10_alua_lu_gp *default_lu_gp;
/* target_core_device.c */
-extern struct mutex g_device_mutex;
-extern struct list_head g_device_list;
-
int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev);
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
void target_pr_kref_release(struct kref *);
@@ -87,6 +84,8 @@ void core_dev_release_virtual_lun0(void);
struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
int target_configure_device(struct se_device *dev);
void target_free_device(struct se_device *);
+int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
+ void *data);
/* target_core_configfs.c */
void target_setup_backend_cits(struct target_backend *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 129ca572673c..6d5def64db61 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1562,10 +1562,7 @@ core_scsi3_decode_spec_i_port(
* first extract TransportID Parameter Data Length, and make sure
* the value matches up to the SCSI expected data transfer length.
*/
- tpdl = (buf[24] & 0xff) << 24;
- tpdl |= (buf[25] & 0xff) << 16;
- tpdl |= (buf[26] & 0xff) << 8;
- tpdl |= buf[27] & 0xff;
+ tpdl = get_unaligned_be32(&buf[24]);
if ((tpdl + 28) != cmd->data_length) {
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
@@ -3221,12 +3218,8 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
goto out_put_pr_reg;
}
- rtpi = (buf[18] & 0xff) << 8;
- rtpi |= buf[19] & 0xff;
- tid_len = (buf[20] & 0xff) << 24;
- tid_len |= (buf[21] & 0xff) << 16;
- tid_len |= (buf[22] & 0xff) << 8;
- tid_len |= buf[23] & 0xff;
+ rtpi = get_unaligned_be16(&buf[18]);
+ tid_len = get_unaligned_be32(&buf[20]);
transport_kunmap_data_sg(cmd);
buf = NULL;
@@ -3552,16 +3545,6 @@ out_put_pr_reg:
return ret;
}
-static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
-{
- unsigned int __v1, __v2;
-
- __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
- __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
-
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
-}
-
/*
* See spc4r17 section 6.14 Table 170
*/
@@ -3602,7 +3585,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
}
/*
@@ -3619,8 +3602,8 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
/*
* From PERSISTENT_RESERVE_OUT parameter list (payload)
*/
- res_key = core_scsi3_extract_reservation_key(&buf[0]);
- sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
+ res_key = get_unaligned_be64(&buf[0]);
+ sa_res_key = get_unaligned_be64(&buf[8]);
/*
* REGISTER_AND_MOVE uses a different SA parameter list containing
* SCSI TransportIDs.
@@ -3646,7 +3629,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
- if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+ if (spec_i_pt && (sa != PRO_REGISTER))
return TCM_INVALID_PARAMETER_LIST;
/*
@@ -3658,11 +3641,11 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
* the sense key set to ILLEGAL REQUEST, and the additional sense
* code set to PARAMETER LIST LENGTH ERROR.
*/
- if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+ if (!spec_i_pt && (sa != PRO_REGISTER_AND_MOVE) &&
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
}
/*
@@ -3702,7 +3685,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
break;
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
- " action: 0x%02x\n", cdb[1] & 0x1f);
+ " action: 0x%02x\n", sa);
return TCM_INVALID_CDB_FIELD;
}
@@ -3734,10 +3717,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (dev->t10_pr.pr_generation & 0xff);
+ put_unaligned_be32(dev->t10_pr.pr_generation, buf);
spin_lock(&dev->t10_pr.registration_lock);
list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
@@ -3749,23 +3729,13 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
if ((add_len + 8) > (cmd->data_length - 8))
break;
- buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
- buf[off++] = (pr_reg->pr_res_key & 0xff);
-
+ put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
+ off += 8;
add_len += 8;
}
spin_unlock(&dev->t10_pr.registration_lock);
- buf[4] = ((add_len >> 24) & 0xff);
- buf[5] = ((add_len >> 16) & 0xff);
- buf[6] = ((add_len >> 8) & 0xff);
- buf[7] = (add_len & 0xff);
+ put_unaligned_be32(add_len, &buf[4]);
transport_kunmap_data_sg(cmd);
@@ -3796,10 +3766,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (dev->t10_pr.pr_generation & 0xff);
+ put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]);
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
@@ -3807,10 +3774,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
/*
* Set the hardcoded Additional Length
*/
- buf[4] = ((add_len >> 24) & 0xff);
- buf[5] = ((add_len >> 16) & 0xff);
- buf[6] = ((add_len >> 8) & 0xff);
- buf[7] = (add_len & 0xff);
+ put_unaligned_be32(add_len, &buf[4]);
if (cmd->data_length < 22)
goto err;
@@ -3837,14 +3801,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
else
pr_res_key = pr_reg->pr_res_key;
- buf[8] = ((pr_res_key >> 56) & 0xff);
- buf[9] = ((pr_res_key >> 48) & 0xff);
- buf[10] = ((pr_res_key >> 40) & 0xff);
- buf[11] = ((pr_res_key >> 32) & 0xff);
- buf[12] = ((pr_res_key >> 24) & 0xff);
- buf[13] = ((pr_res_key >> 16) & 0xff);
- buf[14] = ((pr_res_key >> 8) & 0xff);
- buf[15] = (pr_res_key & 0xff);
+ put_unaligned_be64(pr_res_key, &buf[8]);
/*
* Set the SCOPE and TYPE
*/
@@ -3882,8 +3839,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- buf[0] = ((add_len >> 8) & 0xff);
- buf[1] = (add_len & 0xff);
+ put_unaligned_be16(add_len, &buf[0]);
buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
@@ -3947,10 +3903,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (dev->t10_pr.pr_generation & 0xff);
+ put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]);
spin_lock(&dev->dev_reservation_lock);
if (dev->dev_pr_res_holder) {
@@ -3992,14 +3945,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
/*
* Set RESERVATION KEY
*/
- buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
- buf[off++] = (pr_reg->pr_res_key & 0xff);
+ put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
+ off += 8;
off += 4; /* Skip Over Reserved area */
/*
@@ -4041,8 +3988,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (!pr_reg->pr_reg_all_tg_pt) {
u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi;
- buf[off++] = ((sep_rtpi >> 8) & 0xff);
- buf[off++] = (sep_rtpi & 0xff);
+ put_unaligned_be16(sep_rtpi, &buf[off]);
+ off += 2;
} else
off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */
@@ -4062,10 +4009,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
/*
* Set the ADDITIONAL DESCRIPTOR LENGTH
*/
- buf[off++] = ((desc_len >> 24) & 0xff);
- buf[off++] = ((desc_len >> 16) & 0xff);
- buf[off++] = ((desc_len >> 8) & 0xff);
- buf[off++] = (desc_len & 0xff);
+ put_unaligned_be32(desc_len, &buf[off]);
/*
* Size of full desctipor header minus TransportID
* containing $FABRIC_MOD specific) initiator device/port
@@ -4082,10 +4026,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
/*
* Set ADDITIONAL_LENGTH
*/
- buf[4] = ((add_len >> 24) & 0xff);
- buf[5] = ((add_len >> 16) & 0xff);
- buf[6] = ((add_len >> 8) & 0xff);
- buf[7] = (add_len & 0xff);
+ put_unaligned_be32(add_len, &buf[4]);
transport_kunmap_data_sg(cmd);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ceec0211e84e..7c69b4a9694d 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -168,7 +168,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
/*
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
- sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+ sdev->sector_size = get_unaligned_be24(&buf[9]);
out_free:
if (!sdev->sector_size)
sdev->sector_size = 1024;
@@ -209,8 +209,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
cdb[0] = INQUIRY;
cdb[1] = 0x01; /* Query VPD */
cdb[2] = 0x80; /* Unit Serial Number */
- cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
- cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
+ put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]);
ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
@@ -245,8 +244,7 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
cdb[0] = INQUIRY;
cdb[1] = 0x01; /* Query VPD */
cdb[2] = 0x83; /* Device Identifier */
- cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
- cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
+ put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]);
ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
@@ -254,7 +252,7 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
if (ret)
goto out;
- page_len = (buf[2] << 8) | buf[3];
+ page_len = get_unaligned_be16(&buf[2]);
while (page_len > 0) {
/* Grab a pointer to the Identification descriptor */
page_83 = &buf[off];
@@ -384,7 +382,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
spin_unlock_irq(sh->host_lock);
/*
* Claim exclusive struct block_device access to struct scsi_device
- * for TYPE_DISK using supplied udev_path
+ * for TYPE_DISK and TYPE_ZBC using supplied udev_path
*/
bd = blkdev_get_by_path(dev->udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
@@ -402,8 +400,9 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
return ret;
}
- pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%llu\n",
- phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
+ pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n",
+ phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC",
+ sh->host_no, sd->channel, sd->id, sd->lun);
return 0;
}
@@ -522,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev)
*/
switch (sd->type) {
case TYPE_DISK:
+ case TYPE_ZBC:
ret = pscsi_create_type_disk(dev, sd);
break;
default:
@@ -566,6 +566,11 @@ static void pscsi_dev_call_rcu(struct rcu_head *p)
static void pscsi_free_device(struct se_device *dev)
{
+ call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
+}
+
+static void pscsi_destroy_device(struct se_device *dev)
+{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct scsi_device *sd = pdv->pdv_sd;
@@ -573,9 +578,11 @@ static void pscsi_free_device(struct se_device *dev)
if (sd) {
/*
* Release exclusive pSCSI internal struct block_device claim for
- * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
+ * struct scsi_device with TYPE_DISK or TYPE_ZBC
+ * from pscsi_create_type_disk()
*/
- if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
+ if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
+ pdv->pdv_bd) {
blkdev_put(pdv->pdv_bd,
FMODE_WRITE|FMODE_READ|FMODE_EXCL);
pdv->pdv_bd = NULL;
@@ -594,15 +601,13 @@ static void pscsi_free_device(struct se_device *dev)
pdv->pdv_sd = NULL;
}
- call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
}
-static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
- unsigned char *sense_buffer)
+static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
+ unsigned char *req_sense)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd;
- int result;
struct pscsi_plugin_task *pt = cmd->priv;
unsigned char *cdb;
/*
@@ -613,7 +618,6 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
return;
cdb = &pt->pscsi_cdb[0];
- result = pt->pscsi_result;
/*
* Hack to make sure that Write-Protect modepage is set if R/O mode is
* forced.
@@ -622,7 +626,7 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
goto after_mode_sense;
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
- (status_byte(result) << 1) == SAM_STAT_GOOD) {
+ scsi_status == SAM_STAT_GOOD) {
bool read_only = target_lun_is_rdonly(cmd);
if (read_only) {
@@ -657,40 +661,36 @@ after_mode_sense:
* storage engine.
*/
if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
- (status_byte(result) << 1) == SAM_STAT_GOOD) {
+ scsi_status == SAM_STAT_GOOD) {
unsigned char *buf;
u16 bdl;
u32 blocksize;
- buf = sg_virt(&sg[0]);
+ buf = sg_virt(&cmd->t_data_sg[0]);
if (!buf) {
pr_err("Unable to get buf for scatterlist\n");
goto after_mode_select;
}
if (cdb[0] == MODE_SELECT)
- bdl = (buf[3]);
+ bdl = buf[3];
else
- bdl = (buf[6] << 8) | (buf[7]);
+ bdl = get_unaligned_be16(&buf[6]);
if (!bdl)
goto after_mode_select;
if (cdb[0] == MODE_SELECT)
- blocksize = (buf[9] << 16) | (buf[10] << 8) |
- (buf[11]);
+ blocksize = get_unaligned_be24(&buf[9]);
else
- blocksize = (buf[13] << 16) | (buf[14] << 8) |
- (buf[15]);
+ blocksize = get_unaligned_be24(&buf[13]);
sd->sector_size = blocksize;
}
after_mode_select:
- if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) {
- memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER);
- cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
- }
+ if (scsi_status == SAM_STAT_CHECK_CONDITION)
+ transport_copy_sense_to_cmd(cmd, req_sense);
}
enum {
@@ -1002,7 +1002,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req->end_io_data = cmd;
scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb);
scsi_req(req)->cmd = &pt->pscsi_cdb[0];
- if (pdv->pdv_sd->type == TYPE_DISK)
+ if (pdv->pdv_sd->type == TYPE_DISK ||
+ pdv->pdv_sd->type == TYPE_ZBC)
req->timeout = PS_TIMEOUT_DISK;
else
req->timeout = PS_TIMEOUT_OTHER;
@@ -1047,30 +1048,29 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
{
struct se_cmd *cmd = req->end_io_data;
struct pscsi_plugin_task *pt = cmd->priv;
+ int result = scsi_req(req)->result;
+ u8 scsi_status = status_byte(result) << 1;
- pt->pscsi_result = scsi_req(req)->result;
- pt->pscsi_resid = scsi_req(req)->resid_len;
-
- cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
- if (cmd->scsi_status) {
+ if (scsi_status) {
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
- pt->pscsi_result);
+ result);
}
- switch (host_byte(pt->pscsi_result)) {
+ pscsi_complete_cmd(cmd, scsi_status, scsi_req(req)->sense);
+
+ switch (host_byte(result)) {
case DID_OK:
- target_complete_cmd(cmd, cmd->scsi_status);
+ target_complete_cmd(cmd, scsi_status);
break;
default:
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
- pt->pscsi_result);
+ result);
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
break;
}
- memcpy(pt->pscsi_sense, scsi_req(req)->sense, TRANSPORT_SENSE_BUFFER);
__blk_put_request(req->q, req);
kfree(pt);
}
@@ -1086,8 +1086,8 @@ static const struct target_backend_ops pscsi_ops = {
.pmode_enable_hba = pscsi_pmode_enable_hba,
.alloc_device = pscsi_alloc_device,
.configure_device = pscsi_configure_device,
+ .destroy_device = pscsi_destroy_device,
.free_device = pscsi_free_device,
- .transport_complete = pscsi_transport_complete,
.parse_cdb = pscsi_parse_cdb,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 8a02fa47c7e8..b86fb0e1b783 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -23,10 +23,6 @@ struct scsi_device;
struct Scsi_Host;
struct pscsi_plugin_task {
- unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER];
- int pscsi_direction;
- int pscsi_result;
- u32 pscsi_resid;
unsigned char pscsi_cdb[0];
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 20253d04103f..a6e8106abd6f 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -339,10 +339,14 @@ static void rd_dev_call_rcu(struct rcu_head *p)
static void rd_free_device(struct se_device *dev)
{
+ call_rcu(&dev->rcu_head, rd_dev_call_rcu);
+}
+
+static void rd_destroy_device(struct se_device *dev)
+{
struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev);
- call_rcu(&dev->rcu_head, rd_dev_call_rcu);
}
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
@@ -554,7 +558,7 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
struct rd_dev *rd_dev = RD_DEV(dev);
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, arg, token;
+ int arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -589,7 +593,7 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
}
kfree(orig);
- return (!ret) ? count : ret;
+ return count;
}
static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
@@ -651,6 +655,7 @@ static const struct target_backend_ops rd_mcp_ops = {
.detach_hba = rd_detach_hba,
.alloc_device = rd_alloc_device,
.configure_device = rd_configure_device,
+ .destroy_device = rd_destroy_device,
.free_device = rd_free_device,
.parse_cdb = rd_parse_cdb,
.set_configfs_dev_params = rd_set_configfs_dev_params,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index dc9456e7dac9..750a04ed0e93 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -71,14 +71,8 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
else
blocks = (u32)blocks_long;
- buf[0] = (blocks >> 24) & 0xff;
- buf[1] = (blocks >> 16) & 0xff;
- buf[2] = (blocks >> 8) & 0xff;
- buf[3] = blocks & 0xff;
- buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
- buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
- buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
- buf[7] = dev->dev_attrib.block_size & 0xff;
+ put_unaligned_be32(blocks, &buf[0]);
+ put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]);
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
@@ -102,18 +96,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
unsigned long long blocks = dev->transport->get_blocks(dev);
memset(buf, 0, sizeof(buf));
- buf[0] = (blocks >> 56) & 0xff;
- buf[1] = (blocks >> 48) & 0xff;
- buf[2] = (blocks >> 40) & 0xff;
- buf[3] = (blocks >> 32) & 0xff;
- buf[4] = (blocks >> 24) & 0xff;
- buf[5] = (blocks >> 16) & 0xff;
- buf[6] = (blocks >> 8) & 0xff;
- buf[7] = blocks & 0xff;
- buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
- buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
- buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
- buf[11] = dev->dev_attrib.block_size & 0xff;
+ put_unaligned_be64(blocks, &buf[0]);
+ put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]);
/*
* Set P_TYPE and PROT_EN bits for DIF support
*/
@@ -134,8 +118,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
if (dev->transport->get_alignment_offset_lbas) {
u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
- buf[14] = (lalba >> 8) & 0x3f;
- buf[15] = lalba & 0xff;
+
+ put_unaligned_be16(lalba, &buf[14]);
}
/*
@@ -262,18 +246,17 @@ static inline u32 transport_get_sectors_6(unsigned char *cdb)
static inline u32 transport_get_sectors_10(unsigned char *cdb)
{
- return (u32)(cdb[7] << 8) + cdb[8];
+ return get_unaligned_be16(&cdb[7]);
}
static inline u32 transport_get_sectors_12(unsigned char *cdb)
{
- return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+ return get_unaligned_be32(&cdb[6]);
}
static inline u32 transport_get_sectors_16(unsigned char *cdb)
{
- return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
- (cdb[12] << 8) + cdb[13];
+ return get_unaligned_be32(&cdb[10]);
}
/*
@@ -281,29 +264,23 @@ static inline u32 transport_get_sectors_16(unsigned char *cdb)
*/
static inline u32 transport_get_sectors_32(unsigned char *cdb)
{
- return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
- (cdb[30] << 8) + cdb[31];
+ return get_unaligned_be32(&cdb[28]);
}
static inline u32 transport_lba_21(unsigned char *cdb)
{
- return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+ return get_unaligned_be24(&cdb[1]) & 0x1fffff;
}
static inline u32 transport_lba_32(unsigned char *cdb)
{
- return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+ return get_unaligned_be32(&cdb[2]);
}
static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
- unsigned int __v1, __v2;
-
- __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
- __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
-
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+ return get_unaligned_be64(&cdb[2]);
}
/*
@@ -311,12 +288,7 @@ static inline unsigned long long transport_lba_64(unsigned char *cdb)
*/
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
{
- unsigned int __v1, __v2;
-
- __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
- __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
-
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+ return get_unaligned_be64(&cdb[12]);
}
static sense_reason_t
@@ -1005,6 +977,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
break;
}
case COMPARE_AND_WRITE:
+ if (!dev->dev_attrib.emulate_caw) {
+ pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject"
+ " COMPARE_AND_WRITE\n", dev->se_hba->backend->ops->name,
+ dev->dev_group.cg_item.ci_name, dev->t10_wwn.unit_serial);
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
sectors = cdb[13];
/*
* Currently enforce COMPARE_AND_WRITE for a single sector
@@ -1045,8 +1023,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
cmd->t_task_cdb[1] & 0x1f);
return TCM_INVALID_CDB_FIELD;
}
- size = (cdb[10] << 24) | (cdb[11] << 16) |
- (cdb[12] << 8) | cdb[13];
+ size = get_unaligned_be32(&cdb[10]);
break;
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 2a91ed3ef380..cb0461a10808 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -287,8 +287,8 @@ check_t10_vend_desc:
/* Skip over Obsolete field in RTPI payload
* in Table 472 */
off += 2;
- buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
- buf[off++] = (lun->lun_rtpi & 0xff);
+ put_unaligned_be16(lun->lun_rtpi, &buf[off]);
+ off += 2;
len += 8; /* Header size + Designation descriptor */
/*
* Target port group identifier, see spc4r17
@@ -316,8 +316,8 @@ check_t10_vend_desc:
off++; /* Skip over Reserved */
buf[off++] = 4; /* DESIGNATOR LENGTH */
off += 2; /* Skip over Reserved Field */
- buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
- buf[off++] = (tg_pt_gp_id & 0xff);
+ put_unaligned_be16(tg_pt_gp_id, &buf[off]);
+ off += 2;
len += 8; /* Header size + Designation descriptor */
/*
* Logical Unit Group identifier, see spc4r17
@@ -343,8 +343,8 @@ check_lu_gp:
off++; /* Skip over Reserved */
buf[off++] = 4; /* DESIGNATOR LENGTH */
off += 2; /* Skip over Reserved Field */
- buf[off++] = ((lu_gp_id >> 8) & 0xff);
- buf[off++] = (lu_gp_id & 0xff);
+ put_unaligned_be16(lu_gp_id, &buf[off]);
+ off += 2;
len += 8; /* Header size + Designation descriptor */
/*
* SCSI name string designator, see spc4r17
@@ -431,8 +431,7 @@ check_scsi_name:
/* Header size + Designation descriptor */
len += (scsi_target_len + 4);
}
- buf[2] = ((len >> 8) & 0xff);
- buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
+ put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */
return 0;
}
EXPORT_SYMBOL(spc_emulate_evpd_83);
@@ -1288,7 +1287,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
cmd->execute_cmd = spc_emulate_modeselect;
break;
case MODE_SELECT_10:
- *size = (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = spc_emulate_modeselect;
break;
case MODE_SENSE:
@@ -1296,25 +1295,25 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
cmd->execute_cmd = spc_emulate_modesense;
break;
case MODE_SENSE_10:
- *size = (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = spc_emulate_modesense;
break;
case LOG_SELECT:
case LOG_SENSE:
- *size = (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
break;
case PERSISTENT_RESERVE_IN:
- *size = (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = target_scsi3_emulate_pr_in;
break;
case PERSISTENT_RESERVE_OUT:
- *size = (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be32(&cdb[5]);
cmd->execute_cmd = target_scsi3_emulate_pr_out;
break;
case RELEASE:
case RELEASE_10:
if (cdb[0] == RELEASE_10)
- *size = (cdb[7] << 8) | cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
else
*size = cmd->data_length;
@@ -1327,7 +1326,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* Assume the passthrough or $FABRIC_MOD will tell us about it.
*/
if (cdb[0] == RESERVE_10)
- *size = (cdb[7] << 8) | cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
else
*size = cmd->data_length;
@@ -1338,7 +1337,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
cmd->execute_cmd = spc_emulate_request_sense;
break;
case INQUIRY:
- *size = (cdb[3] << 8) + cdb[4];
+ *size = get_unaligned_be16(&cdb[3]);
/*
* Do implicit HEAD_OF_QUEUE processing for INQUIRY.
@@ -1349,7 +1348,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
break;
case SECURITY_PROTOCOL_IN:
case SECURITY_PROTOCOL_OUT:
- *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ *size = get_unaligned_be32(&cdb[6]);
break;
case EXTENDED_COPY:
*size = get_unaligned_be32(&cdb[10]);
@@ -1361,19 +1360,18 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
break;
case READ_ATTRIBUTE:
case WRITE_ATTRIBUTE:
- *size = (cdb[10] << 24) | (cdb[11] << 16) |
- (cdb[12] << 8) | cdb[13];
+ *size = get_unaligned_be32(&cdb[10]);
break;
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
- *size = (cdb[3] << 8) | cdb[4];
+ *size = get_unaligned_be16(&cdb[3]);
break;
case WRITE_BUFFER:
- *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be24(&cdb[6]);
break;
case REPORT_LUNS:
cmd->execute_cmd = spc_emulate_report_luns;
- *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ *size = get_unaligned_be32(&cdb[6]);
/*
* Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 13f47bf4d16b..e22847bd79b9 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -355,20 +355,10 @@ static void core_tmr_drain_state_list(
cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
list_del_init(&cmd->state_list);
- pr_debug("LUN_RESET: %s cmd: %p"
- " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
- "cdb: 0x%02x\n",
- (preempt_and_abort_list) ? "Preempt" : "", cmd,
- cmd->tag, 0,
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
- cmd->t_task_cdb[0]);
- pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx"
- " -- CMD_T_ACTIVE: %d"
- " CMD_T_STOP: %d CMD_T_SENT: %d\n",
- cmd->tag, cmd->pr_res_key,
- (cmd->transport_state & CMD_T_ACTIVE) != 0,
- (cmd->transport_state & CMD_T_STOP) != 0,
- (cmd->transport_state & CMD_T_SENT) != 0);
+ target_show_cmd("LUN_RESET: ", cmd);
+ pr_debug("LUN_RESET: ITT[0x%08llx] - %s pr_res_key: 0x%016Lx\n",
+ cmd->tag, (preempt_and_abort_list) ? "preempt" : "",
+ cmd->pr_res_key);
/*
* If the command may be queued onto a workqueue cancel it now.
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 310d9e55c6eb..36913734c6bc 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -576,7 +576,6 @@ struct se_lun *core_tpg_alloc_lun(
return ERR_PTR(-ENOMEM);
}
lun->unpacked_lun = unpacked_lun;
- lun->lun_link_magic = SE_LUN_LINK_MAGIC;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_ref_comp);
init_completion(&lun->lun_shutdown_comp);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index f1b3a46bdcaf..97fed9a298bd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -252,7 +252,7 @@ int transport_alloc_session_tags(struct se_session *se_sess,
int rc;
se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
- GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
if (!se_sess->sess_cmd_map) {
se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
if (!se_sess->sess_cmd_map) {
@@ -704,23 +704,43 @@ static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
return cmd->sense_buffer;
}
+void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
+{
+ unsigned char *cmd_sense_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ cmd_sense_buf = transport_get_sense_buffer(cmd);
+ if (!cmd_sense_buf) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
+
+ cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+ memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+}
+EXPORT_SYMBOL(transport_copy_sense_to_cmd);
+
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
{
struct se_device *dev = cmd->se_dev;
- int success = scsi_status == GOOD;
+ int success;
unsigned long flags;
cmd->scsi_status = scsi_status;
-
spin_lock_irqsave(&cmd->t_state_lock, flags);
-
- if (dev && dev->transport->transport_complete) {
- dev->transport->transport_complete(cmd,
- cmd->t_data_sg,
- transport_get_sense_buffer(cmd));
+ switch (cmd->scsi_status) {
+ case SAM_STAT_CHECK_CONDITION:
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
success = 1;
+ else
+ success = 0;
+ break;
+ default:
+ success = 1;
+ break;
}
/*
@@ -730,6 +750,15 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
if (cmd->transport_state & CMD_T_ABORTED ||
cmd->transport_state & CMD_T_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ /*
+ * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
+ * release se_device->caw_sem obtained by sbc_compare_and_write()
+ * since target_complete_ok_work() or target_complete_failure_work()
+ * won't be called to invoke the normal CAW completion callbacks.
+ */
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
+ up(&dev->caw_sem);
+ }
complete_all(&cmd->t_transport_stop_comp);
return;
} else if (!success) {
@@ -1239,6 +1268,7 @@ void transport_init_se_cmd(
init_completion(&cmd->t_transport_stop_comp);
init_completion(&cmd->cmd_wait_comp);
spin_lock_init(&cmd->t_state_lock);
+ INIT_WORK(&cmd->work, NULL);
kref_init(&cmd->cmd_kref);
cmd->se_tfo = tfo;
@@ -1590,9 +1620,33 @@ static void target_complete_tmr_failure(struct work_struct *work)
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+ transport_lun_remove_cmd(se_cmd);
transport_cmd_check_stop_to_fabric(se_cmd);
}
+static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
+ u64 *unpacked_lun)
+{
+ struct se_cmd *se_cmd;
+ unsigned long flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ continue;
+
+ if (se_cmd->tag == tag) {
+ *unpacked_lun = se_cmd->orig_fe_lun;
+ ret = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+ return ret;
+}
+
/**
* target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
* for TMR CDBs
@@ -1640,19 +1694,31 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
core_tmr_release_req(se_cmd->se_tmr_req);
return ret;
}
+ /*
+ * If this is ABORT_TASK with no explicit fabric provided LUN,
+ * go ahead and search active session tags for a match to figure
+ * out unpacked_lun for the original se_cmd.
+ */
+ if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
+ if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun))
+ goto failure;
+ }
ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
- if (ret) {
- /*
- * For callback during failure handling, push this work off
- * to process context with TMR_LUN_DOES_NOT_EXIST status.
- */
- INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
- schedule_work(&se_cmd->work);
- return 0;
- }
+ if (ret)
+ goto failure;
+
transport_generic_handle_tmr(se_cmd);
return 0;
+
+ /*
+ * For callback during failure handling, push this work off
+ * to process context with TMR_LUN_DOES_NOT_EXIST status.
+ */
+failure:
+ INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
+ schedule_work(&se_cmd->work);
+ return 0;
}
EXPORT_SYMBOL(target_submit_tmr);
@@ -1667,15 +1733,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
if (transport_check_aborted_status(cmd, 1))
return;
- pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
- " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
- cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state, sense_reason);
- pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
- (cmd->transport_state & CMD_T_ACTIVE) != 0,
- (cmd->transport_state & CMD_T_STOP) != 0,
- (cmd->transport_state & CMD_T_SENT) != 0);
+ pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
+ sense_reason);
+ target_show_cmd("-----[ ", cmd);
/*
* For SAM Task Attribute emulation for failed struct se_cmd
@@ -2668,6 +2728,108 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
}
EXPORT_SYMBOL(target_put_sess_cmd);
+static const char *data_dir_name(enum dma_data_direction d)
+{
+ switch (d) {
+ case DMA_BIDIRECTIONAL: return "BIDI";
+ case DMA_TO_DEVICE: return "WRITE";
+ case DMA_FROM_DEVICE: return "READ";
+ case DMA_NONE: return "NONE";
+ }
+
+ return "(?)";
+}
+
+static const char *cmd_state_name(enum transport_state_table t)
+{
+ switch (t) {
+ case TRANSPORT_NO_STATE: return "NO_STATE";
+ case TRANSPORT_NEW_CMD: return "NEW_CMD";
+ case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING";
+ case TRANSPORT_PROCESSING: return "PROCESSING";
+ case TRANSPORT_COMPLETE: return "COMPLETE";
+ case TRANSPORT_ISTATE_PROCESSING:
+ return "ISTATE_PROCESSING";
+ case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP";
+ case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK";
+ case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
+ }
+
+ return "(?)";
+}
+
+static void target_append_str(char **str, const char *txt)
+{
+ char *prev = *str;
+
+ *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
+ kstrdup(txt, GFP_ATOMIC);
+ kfree(prev);
+}
+
+/*
+ * Convert a transport state bitmask into a string. The caller is
+ * responsible for freeing the returned pointer.
+ */
+static char *target_ts_to_str(u32 ts)
+{
+ char *str = NULL;
+
+ if (ts & CMD_T_ABORTED)
+ target_append_str(&str, "aborted");
+ if (ts & CMD_T_ACTIVE)
+ target_append_str(&str, "active");
+ if (ts & CMD_T_COMPLETE)
+ target_append_str(&str, "complete");
+ if (ts & CMD_T_SENT)
+ target_append_str(&str, "sent");
+ if (ts & CMD_T_STOP)
+ target_append_str(&str, "stop");
+ if (ts & CMD_T_FABRIC_STOP)
+ target_append_str(&str, "fabric_stop");
+
+ return str;
+}
+
+static const char *target_tmf_name(enum tcm_tmreq_table tmf)
+{
+ switch (tmf) {
+ case TMR_ABORT_TASK: return "ABORT_TASK";
+ case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET";
+ case TMR_CLEAR_ACA: return "CLEAR_ACA";
+ case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET";
+ case TMR_LUN_RESET: return "LUN_RESET";
+ case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
+ case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
+ case TMR_UNKNOWN: break;
+ }
+ return "(?)";
+}
+
+void target_show_cmd(const char *pfx, struct se_cmd *cmd)
+{
+ char *ts_str = target_ts_to_str(cmd->transport_state);
+ const u8 *cdb = cmd->t_task_cdb;
+ struct se_tmr_req *tmf = cmd->se_tmr_req;
+
+ if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+ pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
+ pfx, cdb[0], cdb[1], cmd->tag,
+ data_dir_name(cmd->data_direction),
+ cmd->se_tfo->get_cmd_state(cmd),
+ cmd_state_name(cmd->t_state), cmd->data_length,
+ kref_read(&cmd->cmd_kref), ts_str);
+ } else {
+ pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
+ pfx, target_tmf_name(tmf->function), cmd->tag,
+ tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
+ cmd_state_name(cmd->t_state),
+ kref_read(&cmd->cmd_kref), ts_str);
+ }
+ kfree(ts_str);
+}
+EXPORT_SYMBOL(target_show_cmd);
+
/* target_sess_cmd_list_set_waiting - Flag all commands in
* sess_cmd_list to complete cmd_wait_comp. Set
* sess_tearing_down so no more commands are queued.
@@ -2812,13 +2974,13 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
cmd->transport_state |= CMD_T_STOP;
- pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
- " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+ target_show_cmd("wait_for_tasks: Stopping ", cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
- wait_for_completion(&cmd->t_transport_stop_comp);
+ while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
+ 180 * HZ))
+ target_show_cmd("wait for tasks: ", cmd);
spin_lock_irqsave(&cmd->t_state_lock, *flags);
cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
@@ -3201,6 +3363,7 @@ static void target_tmr_work(struct work_struct *work)
cmd->se_tfo->queue_tm_rsp(cmd);
check_stop:
+ transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
}
@@ -3223,6 +3386,7 @@ int transport_generic_handle_tmr(
pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
"ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
cmd->se_tmr_req->ref_task_tag, cmd->tag);
+ transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return 0;
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index beb5f098f32d..80ee130f8253 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -87,6 +87,8 @@
/* Default maximum of the global data blocks(512K * PAGE_SIZE) */
#define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)
+static u8 tcmu_kern_cmd_reply_supported;
+
static struct device *tcmu_root_device;
struct tcmu_hba {
@@ -95,6 +97,13 @@ struct tcmu_hba {
#define TCMU_CONFIG_LEN 256
+struct tcmu_nl_cmd {
+ /* wake up thread waiting for reply */
+ struct completion complete;
+ int cmd;
+ int status;
+};
+
struct tcmu_dev {
struct list_head node;
struct kref kref;
@@ -135,6 +144,11 @@ struct tcmu_dev {
struct timer_list timeout;
unsigned int cmd_time_out;
+ spinlock_t nl_cmd_lock;
+ struct tcmu_nl_cmd curr_nl_cmd;
+ /* wake up threads waiting on curr_nl_cmd */
+ wait_queue_head_t nl_cmd_wq;
+
char dev_config[TCMU_CONFIG_LEN];
};
@@ -178,16 +192,128 @@ static const struct genl_multicast_group tcmu_mcgrps[] = {
[TCMU_MCGRP_CONFIG] = { .name = "config", },
};
+static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
+ [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
+ [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
+ [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
+ [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
+ [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
+};
+
+static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
+{
+ struct se_device *dev;
+ struct tcmu_dev *udev;
+ struct tcmu_nl_cmd *nl_cmd;
+ int dev_id, rc, ret = 0;
+ bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
+
+ if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
+ !info->attrs[TCMU_ATTR_DEVICE_ID]) {
+ printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
+ return -EINVAL;
+ }
+
+ dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
+ rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
+
+ dev = target_find_device(dev_id, !is_removed);
+ if (!dev) {
+ printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
+ completed_cmd, rc, dev_id);
+ return -ENODEV;
+ }
+ udev = TCMU_DEV(dev);
+
+ spin_lock(&udev->nl_cmd_lock);
+ nl_cmd = &udev->curr_nl_cmd;
+
+ pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
+ nl_cmd->cmd, completed_cmd, rc);
+
+ if (nl_cmd->cmd != completed_cmd) {
+ printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
+ completed_cmd, nl_cmd->cmd);
+ ret = -EINVAL;
+ } else {
+ nl_cmd->status = rc;
+ }
+
+ spin_unlock(&udev->nl_cmd_lock);
+ if (!is_removed)
+ target_undepend_item(&dev->dev_group.cg_item);
+ if (!ret)
+ complete(&nl_cmd->complete);
+ return ret;
+}
+
+static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
+{
+ return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
+}
+
+static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
+{
+ return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
+}
+
+static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
+}
+
+static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
+ tcmu_kern_cmd_reply_supported =
+ nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
+ printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
+ tcmu_kern_cmd_reply_supported);
+ }
+
+ return 0;
+}
+
+static const struct genl_ops tcmu_genl_ops[] = {
+ {
+ .cmd = TCMU_CMD_SET_FEATURES,
+ .flags = GENL_ADMIN_PERM,
+ .policy = tcmu_attr_policy,
+ .doit = tcmu_genl_set_features,
+ },
+ {
+ .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
+ .flags = GENL_ADMIN_PERM,
+ .policy = tcmu_attr_policy,
+ .doit = tcmu_genl_add_dev_done,
+ },
+ {
+ .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
+ .flags = GENL_ADMIN_PERM,
+ .policy = tcmu_attr_policy,
+ .doit = tcmu_genl_rm_dev_done,
+ },
+ {
+ .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
+ .flags = GENL_ADMIN_PERM,
+ .policy = tcmu_attr_policy,
+ .doit = tcmu_genl_reconfig_dev_done,
+ },
+};
+
/* Our generic netlink family */
static struct genl_family tcmu_genl_family __ro_after_init = {
.module = THIS_MODULE,
.hdrsize = 0,
.name = "TCM-USER",
- .version = 1,
+ .version = 2,
.maxattr = TCMU_ATTR_MAX,
.mcgrps = tcmu_mcgrps,
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
.netnsok = true,
+ .ops = tcmu_genl_ops,
+ .n_ops = ARRAY_SIZE(tcmu_genl_ops),
};
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
@@ -216,7 +342,6 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
page = radix_tree_lookup(&udev->data_blocks, dbi);
if (!page) {
-
if (atomic_add_return(1, &global_db_count) >
TCMU_GLOBAL_MAX_BLOCKS) {
atomic_dec(&global_db_count);
@@ -226,14 +351,11 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
/* try to get new page from the mm */
page = alloc_page(GFP_KERNEL);
if (!page)
- return false;
+ goto err_alloc;
ret = radix_tree_insert(&udev->data_blocks, dbi, page);
- if (ret) {
- __free_page(page);
- return false;
- }
-
+ if (ret)
+ goto err_insert;
}
if (dbi > udev->dbi_max)
@@ -243,6 +365,11 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
tcmu_cmd_set_dbi(tcmu_cmd, dbi);
return true;
+err_insert:
+ __free_page(page);
+err_alloc:
+ atomic_dec(&global_db_count);
+ return false;
}
static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
@@ -401,7 +528,7 @@ static inline size_t get_block_offset_user(struct tcmu_dev *dev,
DATA_BLOCK_SIZE - remaining;
}
-static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
+static inline size_t iov_tail(struct iovec *iov)
{
return (size_t)iov->iov_base + iov->iov_len;
}
@@ -437,10 +564,10 @@ static int scatter_data_area(struct tcmu_dev *udev,
to_offset = get_block_offset_user(udev, dbi,
block_remaining);
offset = DATA_BLOCK_SIZE - block_remaining;
- to = (void *)(unsigned long)to + offset;
+ to += offset;
if (*iov_cnt != 0 &&
- to_offset == iov_tail(udev, *iov)) {
+ to_offset == iov_tail(*iov)) {
(*iov)->iov_len += copy_bytes;
} else {
new_iov(iov, iov_cnt, udev);
@@ -510,7 +637,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
offset = DATA_BLOCK_SIZE - block_remaining;
- from = (void *)(unsigned long)from + offset;
+ from += offset;
tcmu_flush_dcache_range(from, copy_bytes);
memcpy(to + sg->length - sg_remaining, from,
copy_bytes);
@@ -596,10 +723,7 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
}
}
- if (!tcmu_get_empty_blocks(udev, cmd))
- return false;
-
- return true;
+ return tcmu_get_empty_blocks(udev, cmd);
}
static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
@@ -699,25 +823,24 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
entry = (void *) mb + CMDR_OFF + cmd_head;
- tcmu_flush_dcache_range(entry, sizeof(*entry));
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
entry->hdr.cmd_id = 0; /* not used for PAD */
entry->hdr.kflags = 0;
entry->hdr.uflags = 0;
+ tcmu_flush_dcache_range(entry, sizeof(*entry));
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
WARN_ON(cmd_head != 0);
}
entry = (void *) mb + CMDR_OFF + cmd_head;
- tcmu_flush_dcache_range(entry, sizeof(*entry));
+ memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
- entry->hdr.kflags = 0;
- entry->hdr.uflags = 0;
/* Handle allocating space from the data area */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
@@ -736,11 +859,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
entry->req.iov_cnt = iov_cnt;
- entry->req.iov_dif_cnt = 0;
/* Handle BIDI commands */
+ iov_cnt = 0;
if (se_cmd->se_cmd_flags & SCF_BIDI) {
- iov_cnt = 0;
iov++;
ret = scatter_data_area(udev, tcmu_cmd,
se_cmd->t_bidi_data_sg,
@@ -753,8 +875,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
pr_err("tcmu: alloc and scatter bidi data failed\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- entry->req.iov_bidi_cnt = iov_cnt;
}
+ entry->req.iov_bidi_cnt = iov_cnt;
/*
* Recalaulate the command's base size and size according
@@ -830,8 +952,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
cmd->se_cmd);
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
- memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
- se_cmd->scsi_sense_length);
+ transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
/* Get Data-In buffer before clean up */
gather_data_area(udev, cmd, true);
@@ -989,6 +1110,9 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
setup_timer(&udev->timeout, tcmu_device_timedout,
(unsigned long)udev);
+ init_waitqueue_head(&udev->nl_cmd_wq);
+ spin_lock_init(&udev->nl_cmd_lock);
+
return &udev->se_dev;
}
@@ -1140,6 +1264,7 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
return -EBUSY;
udev->inode = inode;
+ kref_get(&udev->kref);
pr_debug("open\n");
@@ -1171,12 +1296,59 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
pr_debug("close\n");
- /* release ref from configure */
+ /* release ref from open */
kref_put(&udev->kref, tcmu_dev_kref_release);
return 0;
}
-static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
+static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
+{
+ struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
+
+ if (!tcmu_kern_cmd_reply_supported)
+ return;
+relock:
+ spin_lock(&udev->nl_cmd_lock);
+
+ if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
+ spin_unlock(&udev->nl_cmd_lock);
+ pr_debug("sleeping for open nl cmd\n");
+ wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
+ goto relock;
+ }
+
+ memset(nl_cmd, 0, sizeof(*nl_cmd));
+ nl_cmd->cmd = cmd;
+ init_completion(&nl_cmd->complete);
+
+ spin_unlock(&udev->nl_cmd_lock);
+}
+
+static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
+{
+ struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
+ int ret;
+ DEFINE_WAIT(__wait);
+
+ if (!tcmu_kern_cmd_reply_supported)
+ return 0;
+
+ pr_debug("sleeping for nl reply\n");
+ wait_for_completion(&nl_cmd->complete);
+
+ spin_lock(&udev->nl_cmd_lock);
+ nl_cmd->cmd = TCMU_CMD_UNSPEC;
+ ret = nl_cmd->status;
+ nl_cmd->status = 0;
+ spin_unlock(&udev->nl_cmd_lock);
+
+ wake_up_all(&udev->nl_cmd_wq);
+
+ return ret;;
+}
+
+static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
+ int reconfig_attr, const void *reconfig_data)
{
struct sk_buff *skb;
void *msg_header;
@@ -1190,22 +1362,51 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino
if (!msg_header)
goto free_skb;
- ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
+ ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
+ if (ret < 0)
+ goto free_skb;
+
+ ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
if (ret < 0)
goto free_skb;
- ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
+ ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
if (ret < 0)
goto free_skb;
+ if (cmd == TCMU_CMD_RECONFIG_DEVICE) {
+ switch (reconfig_attr) {
+ case TCMU_ATTR_DEV_CFG:
+ ret = nla_put_string(skb, reconfig_attr, reconfig_data);
+ break;
+ case TCMU_ATTR_DEV_SIZE:
+ ret = nla_put_u64_64bit(skb, reconfig_attr,
+ *((u64 *)reconfig_data),
+ TCMU_ATTR_PAD);
+ break;
+ case TCMU_ATTR_WRITECACHE:
+ ret = nla_put_u8(skb, reconfig_attr,
+ *((u8 *)reconfig_data));
+ break;
+ default:
+ BUG();
+ }
+
+ if (ret < 0)
+ goto free_skb;
+ }
+
genlmsg_end(skb, msg_header);
+ tcmu_init_genl_cmd_reply(udev, cmd);
+
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
-
/* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
+ if (!ret)
+ ret = tcmu_wait_genl_cmd_reply(udev);
return ret;
free_skb:
@@ -1213,19 +1414,14 @@ free_skb:
return ret;
}
-static int tcmu_configure_device(struct se_device *dev)
+static int tcmu_update_uio_info(struct tcmu_dev *udev)
{
- struct tcmu_dev *udev = TCMU_DEV(dev);
struct tcmu_hba *hba = udev->hba->hba_ptr;
struct uio_info *info;
- struct tcmu_mailbox *mb;
- size_t size;
- size_t used;
- int ret = 0;
+ size_t size, used;
char *str;
info = &udev->uio_info;
-
size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
udev->dev_config);
size += 1; /* for \0 */
@@ -1234,12 +1430,27 @@ static int tcmu_configure_device(struct se_device *dev)
return -ENOMEM;
used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
-
if (udev->dev_config[0])
snprintf(str + used, size - used, "/%s", udev->dev_config);
info->name = str;
+ return 0;
+}
+
+static int tcmu_configure_device(struct se_device *dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(dev);
+ struct uio_info *info;
+ struct tcmu_mailbox *mb;
+ int ret = 0;
+
+ ret = tcmu_update_uio_info(udev);
+ if (ret)
+ return ret;
+
+ info = &udev->uio_info;
+
udev->mb_addr = vzalloc(CMDR_SIZE);
if (!udev->mb_addr) {
ret = -ENOMEM;
@@ -1290,6 +1501,8 @@ static int tcmu_configure_device(struct se_device *dev)
/* Other attributes can be configured in userspace */
if (!dev->dev_attrib.hw_max_sectors)
dev->dev_attrib.hw_max_sectors = 128;
+ if (!dev->dev_attrib.emulate_write_cache)
+ dev->dev_attrib.emulate_write_cache = 0;
dev->dev_attrib.hw_queue_depth = 128;
/*
@@ -1298,8 +1511,7 @@ static int tcmu_configure_device(struct se_device *dev)
*/
kref_get(&udev->kref);
- ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
- udev->uio_info.uio_dev->minor);
+ ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL);
if (ret)
goto err_netlink;
@@ -1355,6 +1567,14 @@ static void tcmu_blocks_release(struct tcmu_dev *udev)
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
+
+ /* release ref from init */
+ kref_put(&udev->kref, tcmu_dev_kref_release);
+}
+
+static void tcmu_destroy_device(struct se_device *dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(dev);
struct tcmu_cmd *cmd;
bool all_expired = true;
int i;
@@ -1379,14 +1599,11 @@ static void tcmu_free_device(struct se_device *dev)
tcmu_blocks_release(udev);
- if (tcmu_dev_configured(udev)) {
- tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
- udev->uio_info.uio_dev->minor);
+ tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
- uio_unregister_device(&udev->uio_info);
- }
+ uio_unregister_device(&udev->uio_info);
- /* release ref from init */
+ /* release ref from configure */
kref_put(&udev->kref, tcmu_dev_kref_release);
}
@@ -1546,6 +1763,129 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
}
CONFIGFS_ATTR(tcmu_, cmd_time_out);
+static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
+}
+
+static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ int ret, len;
+
+ len = strlen(page);
+ if (!len || len > TCMU_CONFIG_LEN - 1)
+ return -EINVAL;
+
+ /* Check if device has been configured before */
+ if (tcmu_dev_configured(udev)) {
+ ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
+ TCMU_ATTR_DEV_CFG, page);
+ if (ret) {
+ pr_err("Unable to reconfigure device\n");
+ return ret;
+ }
+ strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
+
+ ret = tcmu_update_uio_info(udev);
+ if (ret)
+ return ret;
+ return count;
+ }
+ strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
+
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, dev_config);
+
+static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
+}
+
+static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ u64 val;
+ int ret;
+
+ ret = kstrtou64(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /* Check if device has been configured before */
+ if (tcmu_dev_configured(udev)) {
+ ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
+ TCMU_ATTR_DEV_SIZE, &val);
+ if (ret) {
+ pr_err("Unable to reconfigure device\n");
+ return ret;
+ }
+ }
+ udev->dev_size = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, dev_size);
+
+static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+
+ return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
+}
+
+static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /* Check if device has been configured before */
+ if (tcmu_dev_configured(udev)) {
+ ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
+ TCMU_ATTR_WRITECACHE, &val);
+ if (ret) {
+ pr_err("Unable to reconfigure device\n");
+ return ret;
+ }
+ }
+
+ da->emulate_write_cache = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, emulate_write_cache);
+
+static struct configfs_attribute *tcmu_attrib_attrs[] = {
+ &tcmu_attr_cmd_time_out,
+ &tcmu_attr_dev_config,
+ &tcmu_attr_dev_size,
+ &tcmu_attr_emulate_write_cache,
+ NULL,
+};
+
static struct configfs_attribute **tcmu_attrs;
static struct target_backend_ops tcmu_ops = {
@@ -1556,6 +1896,7 @@ static struct target_backend_ops tcmu_ops = {
.detach_hba = tcmu_detach_hba,
.alloc_device = tcmu_alloc_device,
.configure_device = tcmu_configure_device,
+ .destroy_device = tcmu_destroy_device,
.free_device = tcmu_free_device,
.parse_cdb = tcmu_parse_cdb,
.set_configfs_dev_params = tcmu_set_configfs_dev_params,
@@ -1573,7 +1914,7 @@ static int unmap_thread_fn(void *data)
struct page *page;
int i;
- while (1) {
+ while (!kthread_should_stop()) {
DEFINE_WAIT(__wait);
prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
@@ -1645,7 +1986,7 @@ static int unmap_thread_fn(void *data)
static int __init tcmu_module_init(void)
{
- int ret, i, len = 0;
+ int ret, i, k, len = 0;
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1670,7 +2011,10 @@ static int __init tcmu_module_init(void)
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
len += sizeof(struct configfs_attribute *);
}
- len += sizeof(struct configfs_attribute *) * 2;
+ for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
+ len += sizeof(struct configfs_attribute *);
+ }
+ len += sizeof(struct configfs_attribute *);
tcmu_attrs = kzalloc(len, GFP_KERNEL);
if (!tcmu_attrs) {
@@ -1681,7 +2025,10 @@ static int __init tcmu_module_init(void)
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
tcmu_attrs[i] = passthrough_attrib_attrs[i];
}
- tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
+ for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
+ tcmu_attrs[i] = tcmu_attrib_attrs[k];
+ i++;
+ }
tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
ret = transport_backend_register(&tcmu_ops);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index cac5a20a4de0..9ee89e00cd77 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -40,6 +40,8 @@
static struct workqueue_struct *xcopy_wq = NULL;
+static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop);
+
static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
{
int off = 0;
@@ -53,48 +55,60 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
return 0;
}
-static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
- struct se_device **found_dev)
+struct xcopy_dev_search_info {
+ const unsigned char *dev_wwn;
+ struct se_device *found_dev;
+};
+
+static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
+ void *data)
{
- struct se_device *se_dev;
+ struct xcopy_dev_search_info *info = data;
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
- mutex_lock(&g_device_mutex);
- list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
+ if (!se_dev->dev_attrib.emulate_3pc)
+ return 0;
- if (!se_dev->dev_attrib.emulate_3pc)
- continue;
+ memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+ target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
- memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
- target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
+ rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
+ if (rc != 0)
+ return 0;
- rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
- if (rc != 0)
- continue;
+ info->found_dev = se_dev;
+ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
- *found_dev = se_dev;
- pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
+ rc = target_depend_item(&se_dev->dev_group.cg_item);
+ if (rc != 0) {
+ pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
+ rc, se_dev);
+ return rc;
+ }
- rc = target_depend_item(&se_dev->dev_group.cg_item);
- if (rc != 0) {
- pr_err("configfs_depend_item attempt failed:"
- " %d for se_dev: %p\n", rc, se_dev);
- mutex_unlock(&g_device_mutex);
- return rc;
- }
+ pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
+ se_dev, &se_dev->dev_group);
+ return 1;
+}
- pr_debug("Called configfs_depend_item for se_dev: %p"
- " se_dev->se_dev_group: %p\n", se_dev,
- &se_dev->dev_group);
+static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
+ struct se_device **found_dev)
+{
+ struct xcopy_dev_search_info info;
+ int ret;
+
+ memset(&info, 0, sizeof(info));
+ info.dev_wwn = dev_wwn;
- mutex_unlock(&g_device_mutex);
+ ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
+ if (ret == 1) {
+ *found_dev = info.found_dev;
return 0;
+ } else {
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ return -EINVAL;
}
- mutex_unlock(&g_device_mutex);
-
- pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
- return -EINVAL;
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
@@ -311,9 +325,7 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
(unsigned long long)xop->dst_lba);
if (dc != 0) {
- xop->dbl = (desc[29] & 0xff) << 16;
- xop->dbl |= (desc[30] & 0xff) << 8;
- xop->dbl |= desc[31] & 0xff;
+ xop->dbl = get_unaligned_be24(&desc[29]);
pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
}
@@ -781,13 +793,24 @@ static int target_xcopy_write_destination(
static void target_xcopy_do_work(struct work_struct *work)
{
struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
- struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
struct se_cmd *ec_cmd = xop->xop_se_cmd;
- sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
+ struct se_device *src_dev, *dst_dev;
+ sector_t src_lba, dst_lba, end_lba;
unsigned int max_sectors;
- int rc;
- unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
+ int rc = 0;
+ unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0;
+
+ if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE)
+ goto err_free;
+ if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev))
+ goto err_free;
+
+ src_dev = xop->src_dev;
+ dst_dev = xop->dst_dev;
+ src_lba = xop->src_lba;
+ dst_lba = xop->dst_lba;
+ nolb = xop->nolb;
end_lba = src_lba + nolb;
/*
* Break up XCOPY I/O into hw_max_sectors sized I/O based on the
@@ -855,6 +878,8 @@ static void target_xcopy_do_work(struct work_struct *work)
out:
xcopy_pt_undepend_remotedev(xop);
+
+err_free:
kfree(xop);
/*
* Don't override an error scsi status if it has already been set
@@ -867,48 +892,22 @@ out:
target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
}
-sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+/*
+ * Returns TCM_NO_SENSE upon success or a sense code != TCM_NO_SENSE if parsing
+ * fails.
+ */
+static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop)
{
- struct se_device *dev = se_cmd->se_dev;
- struct xcopy_op *xop = NULL;
+ struct se_cmd *se_cmd = xop->xop_se_cmd;
unsigned char *p = NULL, *seg_desc;
- unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
+ unsigned int list_id, list_id_usage, sdll, inline_dl;
sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
int rc;
unsigned short tdll;
- if (!dev->dev_attrib.emulate_3pc) {
- pr_err("EXTENDED_COPY operation explicitly disabled\n");
- return TCM_UNSUPPORTED_SCSI_OPCODE;
- }
-
- sa = se_cmd->t_task_cdb[1] & 0x1f;
- if (sa != 0x00) {
- pr_err("EXTENDED_COPY(LID4) not supported\n");
- return TCM_UNSUPPORTED_SCSI_OPCODE;
- }
-
- if (se_cmd->data_length == 0) {
- target_complete_cmd(se_cmd, SAM_STAT_GOOD);
- return TCM_NO_SENSE;
- }
- if (se_cmd->data_length < XCOPY_HDR_LEN) {
- pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
- se_cmd->data_length, XCOPY_HDR_LEN);
- return TCM_PARAMETER_LIST_LENGTH_ERROR;
- }
-
- xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
- if (!xop) {
- pr_err("Unable to allocate xcopy_op\n");
- return TCM_OUT_OF_RESOURCES;
- }
- xop->xop_se_cmd = se_cmd;
-
p = transport_kmap_data_sg(se_cmd);
if (!p) {
pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
- kfree(xop);
return TCM_OUT_OF_RESOURCES;
}
@@ -977,18 +976,57 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
rc * XCOPY_TARGET_DESC_LEN);
transport_kunmap_data_sg(se_cmd);
-
- INIT_WORK(&xop->xop_work, target_xcopy_do_work);
- queue_work(xcopy_wq, &xop->xop_work);
return TCM_NO_SENSE;
out:
if (p)
transport_kunmap_data_sg(se_cmd);
- kfree(xop);
return ret;
}
+sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+{
+ struct se_device *dev = se_cmd->se_dev;
+ struct xcopy_op *xop;
+ unsigned int sa;
+
+ if (!dev->dev_attrib.emulate_3pc) {
+ pr_err("EXTENDED_COPY operation explicitly disabled\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ sa = se_cmd->t_task_cdb[1] & 0x1f;
+ if (sa != 0x00) {
+ pr_err("EXTENDED_COPY(LID4) not supported\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (se_cmd->data_length == 0) {
+ target_complete_cmd(se_cmd, SAM_STAT_GOOD);
+ return TCM_NO_SENSE;
+ }
+ if (se_cmd->data_length < XCOPY_HDR_LEN) {
+ pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
+ se_cmd->data_length, XCOPY_HDR_LEN);
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
+ }
+
+ xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
+ if (!xop)
+ goto err;
+ xop->xop_se_cmd = se_cmd;
+ INIT_WORK(&xop->xop_work, target_xcopy_do_work);
+ if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work)))
+ goto free;
+ return TCM_NO_SENSE;
+
+free:
+ kfree(xop);
+
+err:
+ return TCM_OUT_OF_RESOURCES;
+}
+
static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
{
unsigned char *p;
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 0ecf80890c84..e6863c841662 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -245,7 +245,6 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
*/
err = tz->ops->get_trip_temp(tz, 0, &trip_temp);
if (err < 0) {
- err = PTR_ERR(tz);
dev_err(&pdev->dev,
"Not able to read trip_temp: %d\n",
err);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 69d0f430b2d1..908a8014cf76 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -49,40 +49,45 @@
*/
/**
- * struct power_table - frequency to power conversion
+ * struct freq_table - frequency table along with power entries
* @frequency: frequency in KHz
* @power: power in mW
*
* This structure is built when the cooling device registers and helps
- * in translating frequency to power and viceversa.
+ * in translating frequency to power and vice versa.
*/
-struct power_table {
+struct freq_table {
u32 frequency;
u32 power;
};
/**
+ * struct time_in_idle - Idle time stats
+ * @time: previous reading of the absolute time that this cpu was idle
+ * @timestamp: wall time of the last invocation of get_cpu_idle_time_us()
+ */
+struct time_in_idle {
+ u64 time;
+ u64 timestamp;
+};
+
+/**
* struct cpufreq_cooling_device - data for cooling device with cpufreq
* @id: unique integer value corresponding to each cpufreq_cooling_device
* registered.
- * @cool_dev: thermal_cooling_device pointer to keep track of the
- * registered cooling device.
+ * @last_load: load measured by the latest call to cpufreq_get_requested_power()
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
* @clipped_freq: integer value representing the absolute value of the clipped
* frequency.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
- * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
+ * @freq_table: Freq table in descending order of frequencies
+ * @cdev: thermal_cooling_device pointer to keep track of the
+ * registered cooling device.
+ * @policy: cpufreq policy.
* @node: list_head to link all cpufreq_cooling_device together.
- * @last_load: load measured by the latest call to cpufreq_get_requested_power()
- * @time_in_idle: previous reading of the absolute time that this cpu was idle
- * @time_in_idle_timestamp: wall time of the last invocation of
- * get_cpu_idle_time_us()
- * @dyn_power_table: array of struct power_table for frequency to power
- * conversion, sorted in ascending order.
- * @dyn_power_table_entries: number of entries in the @dyn_power_table array
- * @cpu_dev: the first cpu_device from @allowed_cpus that has OPPs registered
+ * @idle_time: idle time stats
* @plat_get_static_power: callback to calculate the static power
*
* This structure is required for keeping information of each registered
@@ -90,81 +95,45 @@ struct power_table {
*/
struct cpufreq_cooling_device {
int id;
- struct thermal_cooling_device *cool_dev;
+ u32 last_load;
unsigned int cpufreq_state;
unsigned int clipped_freq;
unsigned int max_level;
- unsigned int *freq_table; /* In descending order */
- struct cpumask allowed_cpus;
+ struct freq_table *freq_table; /* In descending order */
+ struct thermal_cooling_device *cdev;
+ struct cpufreq_policy *policy;
struct list_head node;
- u32 last_load;
- u64 *time_in_idle;
- u64 *time_in_idle_timestamp;
- struct power_table *dyn_power_table;
- int dyn_power_table_entries;
- struct device *cpu_dev;
+ struct time_in_idle *idle_time;
get_static_t plat_get_static_power;
};
-static DEFINE_IDA(cpufreq_ida);
+static DEFINE_IDA(cpufreq_ida);
static DEFINE_MUTEX(cooling_list_lock);
-static LIST_HEAD(cpufreq_dev_list);
+static LIST_HEAD(cpufreq_cdev_list);
/* Below code defines functions to be used for cpufreq as cooling device */
/**
* get_level: Find the level for a particular frequency
- * @cpufreq_dev: cpufreq_dev for which the property is required
+ * @cpufreq_cdev: cpufreq_cdev for which the property is required
* @freq: Frequency
*
- * Return: level on success, THERMAL_CSTATE_INVALID on error.
+ * Return: level corresponding to the frequency.
*/
-static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
+static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
unsigned int freq)
{
+ struct freq_table *freq_table = cpufreq_cdev->freq_table;
unsigned long level;
- for (level = 0; level <= cpufreq_dev->max_level; level++) {
- if (freq == cpufreq_dev->freq_table[level])
- return level;
-
- if (freq > cpufreq_dev->freq_table[level])
+ for (level = 1; level <= cpufreq_cdev->max_level; level++)
+ if (freq > freq_table[level].frequency)
break;
- }
- return THERMAL_CSTATE_INVALID;
+ return level - 1;
}
/**
- * cpufreq_cooling_get_level - for a given cpu, return the cooling level.
- * @cpu: cpu for which the level is required
- * @freq: the frequency of interest
- *
- * This function will match the cooling level corresponding to the
- * requested @freq and return it.
- *
- * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID
- * otherwise.
- */
-unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
-{
- struct cpufreq_cooling_device *cpufreq_dev;
-
- mutex_lock(&cooling_list_lock);
- list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
- if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
- mutex_unlock(&cooling_list_lock);
- return get_level(cpufreq_dev, freq);
- }
- }
- mutex_unlock(&cooling_list_lock);
-
- pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
- return THERMAL_CSTATE_INVALID;
-}
-EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
-
-/**
* cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
* @nb: struct notifier_block * with callback info.
* @event: value showing cpufreq event for which this function invoked.
@@ -181,14 +150,18 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
{
struct cpufreq_policy *policy = data;
unsigned long clipped_freq;
- struct cpufreq_cooling_device *cpufreq_dev;
+ struct cpufreq_cooling_device *cpufreq_cdev;
if (event != CPUFREQ_ADJUST)
return NOTIFY_DONE;
mutex_lock(&cooling_list_lock);
- list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
- if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+ list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) {
+ /*
+ * A new copy of the policy is sent to the notifier and can't
+ * compare that directly.
+ */
+ if (policy->cpu != cpufreq_cdev->policy->cpu)
continue;
/*
@@ -202,7 +175,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
* But, if clipped_freq is greater than policy->max, we don't
* need to do anything.
*/
- clipped_freq = cpufreq_dev->clipped_freq;
+ clipped_freq = cpufreq_cdev->clipped_freq;
if (policy->max > clipped_freq)
cpufreq_verify_within_limits(policy, 0, clipped_freq);
@@ -214,63 +187,63 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
}
/**
- * build_dyn_power_table() - create a dynamic power to frequency table
- * @cpufreq_device: the cpufreq cooling device in which to store the table
+ * update_freq_table() - Update the freq table with power numbers
+ * @cpufreq_cdev: the cpufreq cooling device in which to update the table
* @capacitance: dynamic power coefficient for these cpus
*
- * Build a dynamic power to frequency table for this cpu and store it
- * in @cpufreq_device. This table will be used in cpu_power_to_freq() and
- * cpu_freq_to_power() to convert between power and frequency
- * efficiently. Power is stored in mW, frequency in KHz. The
- * resulting table is in ascending order.
+ * Update the freq table with power numbers. This table will be used in
+ * cpu_power_to_freq() and cpu_freq_to_power() to convert between power and
+ * frequency efficiently. Power is stored in mW, frequency in KHz. The
+ * resulting table is in descending order.
*
* Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
- * -ENOMEM if we run out of memory or -EAGAIN if an OPP was
- * added/enabled while the function was executing.
+ * or -ENOMEM if we run out of memory.
*/
-static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
- u32 capacitance)
+static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev,
+ u32 capacitance)
{
- struct power_table *power_table;
+ struct freq_table *freq_table = cpufreq_cdev->freq_table;
struct dev_pm_opp *opp;
struct device *dev = NULL;
- int num_opps = 0, cpu, i, ret = 0;
- unsigned long freq;
-
- for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
- dev = get_cpu_device(cpu);
- if (!dev) {
- dev_warn(&cpufreq_device->cool_dev->device,
- "No cpu device for cpu %d\n", cpu);
- continue;
- }
+ int num_opps = 0, cpu = cpufreq_cdev->policy->cpu, i;
- num_opps = dev_pm_opp_get_opp_count(dev);
- if (num_opps > 0)
- break;
- else if (num_opps < 0)
- return num_opps;
+ dev = get_cpu_device(cpu);
+ if (unlikely(!dev)) {
+ dev_warn(&cpufreq_cdev->cdev->device,
+ "No cpu device for cpu %d\n", cpu);
+ return -ENODEV;
}
- if (num_opps == 0)
- return -EINVAL;
+ num_opps = dev_pm_opp_get_opp_count(dev);
+ if (num_opps < 0)
+ return num_opps;
- power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
- if (!power_table)
- return -ENOMEM;
+ /*
+ * The cpufreq table is also built from the OPP table and so the count
+ * should match.
+ */
+ if (num_opps != cpufreq_cdev->max_level + 1) {
+ dev_warn(dev, "Number of OPPs not matching with max_levels\n");
+ return -EINVAL;
+ }
- for (freq = 0, i = 0;
- opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
- freq++, i++) {
- u32 freq_mhz, voltage_mv;
+ for (i = 0; i <= cpufreq_cdev->max_level; i++) {
+ unsigned long freq = freq_table[i].frequency * 1000;
+ u32 freq_mhz = freq_table[i].frequency / 1000;
u64 power;
+ u32 voltage_mv;
- if (i >= num_opps) {
- ret = -EAGAIN;
- goto free_power_table;
+ /*
+ * Find ceil frequency as 'freq' may be slightly lower than OPP
+ * freq due to truncation while converting to kHz.
+ */
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to get opp for %lu frequency\n",
+ freq);
+ return -EINVAL;
}
- freq_mhz = freq / 1000000;
voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
dev_pm_opp_put(opp);
@@ -281,89 +254,73 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
do_div(power, 1000000000);
- /* frequency is stored in power_table in KHz */
- power_table[i].frequency = freq / 1000;
-
/* power is stored in mW */
- power_table[i].power = power;
+ freq_table[i].power = power;
}
- if (i != num_opps) {
- ret = PTR_ERR(opp);
- goto free_power_table;
- }
-
- cpufreq_device->cpu_dev = dev;
- cpufreq_device->dyn_power_table = power_table;
- cpufreq_device->dyn_power_table_entries = i;
-
return 0;
-
-free_power_table:
- kfree(power_table);
-
- return ret;
}
-static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_device,
+static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
u32 freq)
{
int i;
- struct power_table *pt = cpufreq_device->dyn_power_table;
+ struct freq_table *freq_table = cpufreq_cdev->freq_table;
- for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
- if (freq < pt[i].frequency)
+ for (i = 1; i <= cpufreq_cdev->max_level; i++)
+ if (freq > freq_table[i].frequency)
break;
- return pt[i - 1].power;
+ return freq_table[i - 1].power;
}
-static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
+static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
u32 power)
{
int i;
- struct power_table *pt = cpufreq_device->dyn_power_table;
+ struct freq_table *freq_table = cpufreq_cdev->freq_table;
- for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
- if (power < pt[i].power)
+ for (i = 1; i <= cpufreq_cdev->max_level; i++)
+ if (power > freq_table[i].power)
break;
- return pt[i - 1].frequency;
+ return freq_table[i - 1].frequency;
}
/**
* get_load() - get load for a cpu since last updated
- * @cpufreq_device: &struct cpufreq_cooling_device for this cpu
+ * @cpufreq_cdev: &struct cpufreq_cooling_device for this cpu
* @cpu: cpu number
- * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus
+ * @cpu_idx: index of the cpu in time_in_idle*
*
* Return: The average load of cpu @cpu in percentage since this
* function was last called.
*/
-static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
+static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
int cpu_idx)
{
u32 load;
u64 now, now_idle, delta_time, delta_idle;
+ struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx];
now_idle = get_cpu_idle_time(cpu, &now, 0);
- delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
- delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
+ delta_idle = now_idle - idle_time->time;
+ delta_time = now - idle_time->timestamp;
if (delta_time <= delta_idle)
load = 0;
else
load = div64_u64(100 * (delta_time - delta_idle), delta_time);
- cpufreq_device->time_in_idle[cpu_idx] = now_idle;
- cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
+ idle_time->time = now_idle;
+ idle_time->timestamp = now;
return load;
}
/**
* get_static_power() - calculate the static power consumed by the cpus
- * @cpufreq_device: struct &cpufreq_cooling_device for this cpu cdev
+ * @cpufreq_cdev: struct &cpufreq_cooling_device for this cpu cdev
* @tz: thermal zone device in which we're operating
* @freq: frequency in KHz
* @power: pointer in which to store the calculated static power
@@ -376,26 +333,28 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
*
* Return: 0 on success, -E* on failure.
*/
-static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
+static int get_static_power(struct cpufreq_cooling_device *cpufreq_cdev,
struct thermal_zone_device *tz, unsigned long freq,
u32 *power)
{
struct dev_pm_opp *opp;
unsigned long voltage;
- struct cpumask *cpumask = &cpufreq_device->allowed_cpus;
+ struct cpufreq_policy *policy = cpufreq_cdev->policy;
+ struct cpumask *cpumask = policy->related_cpus;
unsigned long freq_hz = freq * 1000;
+ struct device *dev;
- if (!cpufreq_device->plat_get_static_power ||
- !cpufreq_device->cpu_dev) {
+ if (!cpufreq_cdev->plat_get_static_power) {
*power = 0;
return 0;
}
- opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
- true);
+ dev = get_cpu_device(policy->cpu);
+ WARN_ON(!dev);
+
+ opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
if (IS_ERR(opp)) {
- dev_warn_ratelimited(cpufreq_device->cpu_dev,
- "Failed to find OPP for frequency %lu: %ld\n",
+ dev_warn_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
freq_hz, PTR_ERR(opp));
return -EINVAL;
}
@@ -404,31 +363,30 @@ static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
dev_pm_opp_put(opp);
if (voltage == 0) {
- dev_err_ratelimited(cpufreq_device->cpu_dev,
- "Failed to get voltage for frequency %lu\n",
+ dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n",
freq_hz);
return -EINVAL;
}
- return cpufreq_device->plat_get_static_power(cpumask, tz->passive_delay,
- voltage, power);
+ return cpufreq_cdev->plat_get_static_power(cpumask, tz->passive_delay,
+ voltage, power);
}
/**
* get_dynamic_power() - calculate the dynamic power
- * @cpufreq_device: &cpufreq_cooling_device for this cdev
+ * @cpufreq_cdev: &cpufreq_cooling_device for this cdev
* @freq: current frequency
*
* Return: the dynamic power consumed by the cpus described by
- * @cpufreq_device.
+ * @cpufreq_cdev.
*/
-static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device,
+static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
unsigned long freq)
{
u32 raw_cpu_power;
- raw_cpu_power = cpu_freq_to_power(cpufreq_device, freq);
- return (raw_cpu_power * cpufreq_device->last_load) / 100;
+ raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq);
+ return (raw_cpu_power * cpufreq_cdev->last_load) / 100;
}
/* cpufreq cooling device callback functions are defined below */
@@ -446,9 +404,9 @@ static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device,
static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
- *state = cpufreq_device->max_level;
+ *state = cpufreq_cdev->max_level;
return 0;
}
@@ -465,9 +423,9 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
- *state = cpufreq_device->cpufreq_state;
+ *state = cpufreq_cdev->cpufreq_state;
return 0;
}
@@ -485,23 +443,22 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
- unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
unsigned int clip_freq;
/* Request state should be less than max_level */
- if (WARN_ON(state > cpufreq_device->max_level))
+ if (WARN_ON(state > cpufreq_cdev->max_level))
return -EINVAL;
/* Check if the old cooling action is same as new cooling action */
- if (cpufreq_device->cpufreq_state == state)
+ if (cpufreq_cdev->cpufreq_state == state)
return 0;
- clip_freq = cpufreq_device->freq_table[state];
- cpufreq_device->cpufreq_state = state;
- cpufreq_device->clipped_freq = clip_freq;
+ clip_freq = cpufreq_cdev->freq_table[state].frequency;
+ cpufreq_cdev->cpufreq_state = state;
+ cpufreq_cdev->clipped_freq = clip_freq;
- cpufreq_update_policy(cpu);
+ cpufreq_update_policy(cpufreq_cdev->policy->cpu);
return 0;
}
@@ -536,33 +493,23 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
unsigned long freq;
int i = 0, cpu, ret;
u32 static_power, dynamic_power, total_load = 0;
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
+ struct cpufreq_policy *policy = cpufreq_cdev->policy;
u32 *load_cpu = NULL;
- cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
-
- /*
- * All the CPUs are offline, thus the requested power by
- * the cdev is 0
- */
- if (cpu >= nr_cpu_ids) {
- *power = 0;
- return 0;
- }
-
- freq = cpufreq_quick_get(cpu);
+ freq = cpufreq_quick_get(policy->cpu);
if (trace_thermal_power_cpu_get_power_enabled()) {
- u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus);
+ u32 ncpus = cpumask_weight(policy->related_cpus);
load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
}
- for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
+ for_each_cpu(cpu, policy->related_cpus) {
u32 load;
if (cpu_online(cpu))
- load = get_load(cpufreq_device, cpu, i);
+ load = get_load(cpufreq_cdev, cpu, i);
else
load = 0;
@@ -573,19 +520,19 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
i++;
}
- cpufreq_device->last_load = total_load;
+ cpufreq_cdev->last_load = total_load;
- dynamic_power = get_dynamic_power(cpufreq_device, freq);
- ret = get_static_power(cpufreq_device, tz, freq, &static_power);
+ dynamic_power = get_dynamic_power(cpufreq_cdev, freq);
+ ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
if (ret) {
kfree(load_cpu);
return ret;
}
if (load_cpu) {
- trace_thermal_power_cpu_get_power(
- &cpufreq_device->allowed_cpus,
- freq, load_cpu, i, dynamic_power, static_power);
+ trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
+ load_cpu, i, dynamic_power,
+ static_power);
kfree(load_cpu);
}
@@ -614,38 +561,23 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
unsigned long state, u32 *power)
{
unsigned int freq, num_cpus;
- cpumask_var_t cpumask;
u32 static_power, dynamic_power;
int ret;
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
-
- if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_and(cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask);
- num_cpus = cpumask_weight(cpumask);
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
- /* None of our cpus are online, so no power */
- if (num_cpus == 0) {
- *power = 0;
- ret = 0;
- goto out;
- }
+ /* Request state should be less than max_level */
+ if (WARN_ON(state > cpufreq_cdev->max_level))
+ return -EINVAL;
- freq = cpufreq_device->freq_table[state];
- if (!freq) {
- ret = -EINVAL;
- goto out;
- }
+ num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
- dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus;
- ret = get_static_power(cpufreq_device, tz, freq, &static_power);
+ freq = cpufreq_cdev->freq_table[state].frequency;
+ dynamic_power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
+ ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
if (ret)
- goto out;
+ return ret;
*power = static_power + dynamic_power;
-out:
- free_cpumask_var(cpumask);
return ret;
}
@@ -673,39 +605,27 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
struct thermal_zone_device *tz, u32 power,
unsigned long *state)
{
- unsigned int cpu, cur_freq, target_freq;
+ unsigned int cur_freq, target_freq;
int ret;
s32 dyn_power;
u32 last_load, normalised_power, static_power;
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
+ struct cpufreq_policy *policy = cpufreq_cdev->policy;
- cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
-
- /* None of our cpus are online */
- if (cpu >= nr_cpu_ids)
- return -ENODEV;
-
- cur_freq = cpufreq_quick_get(cpu);
- ret = get_static_power(cpufreq_device, tz, cur_freq, &static_power);
+ cur_freq = cpufreq_quick_get(policy->cpu);
+ ret = get_static_power(cpufreq_cdev, tz, cur_freq, &static_power);
if (ret)
return ret;
dyn_power = power - static_power;
dyn_power = dyn_power > 0 ? dyn_power : 0;
- last_load = cpufreq_device->last_load ?: 1;
+ last_load = cpufreq_cdev->last_load ?: 1;
normalised_power = (dyn_power * 100) / last_load;
- target_freq = cpu_power_to_freq(cpufreq_device, normalised_power);
+ target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
- *state = cpufreq_cooling_get_level(cpu, target_freq);
- if (*state == THERMAL_CSTATE_INVALID) {
- dev_err_ratelimited(&cdev->device,
- "Failed to convert %dKHz for cpu %d into a cdev state\n",
- target_freq, cpu);
- return -EINVAL;
- }
-
- trace_thermal_power_cpu_limit(&cpufreq_device->allowed_cpus,
- target_freq, *state, power);
+ *state = get_level(cpufreq_cdev, target_freq);
+ trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state,
+ power);
return 0;
}
@@ -748,7 +668,7 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
/**
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
* @np: a valid struct device_node to the cooling device device tree node
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ * @policy: cpufreq policy
* Normally this should be same as cpufreq policy->related_cpus.
* @capacitance: dynamic power coefficient for these cpus
* @plat_static_func: function to calculate the static power consumed by these
@@ -764,102 +684,68 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
*/
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus, u32 capacitance,
+ struct cpufreq_policy *policy, u32 capacitance,
get_static_t plat_static_func)
{
- struct cpufreq_policy *policy;
- struct thermal_cooling_device *cool_dev;
- struct cpufreq_cooling_device *cpufreq_dev;
+ struct thermal_cooling_device *cdev;
+ struct cpufreq_cooling_device *cpufreq_cdev;
char dev_name[THERMAL_NAME_LENGTH];
- struct cpufreq_frequency_table *pos, *table;
- cpumask_var_t temp_mask;
unsigned int freq, i, num_cpus;
int ret;
struct thermal_cooling_device_ops *cooling_ops;
bool first;
- if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL))
- return ERR_PTR(-ENOMEM);
-
- cpumask_and(temp_mask, clip_cpus, cpu_online_mask);
- policy = cpufreq_cpu_get(cpumask_first(temp_mask));
- if (!policy) {
- pr_debug("%s: CPUFreq policy not found\n", __func__);
- cool_dev = ERR_PTR(-EPROBE_DEFER);
- goto free_cpumask;
+ if (IS_ERR_OR_NULL(policy)) {
+ pr_err("%s: cpufreq policy isn't valid: %p", __func__, policy);
+ return ERR_PTR(-EINVAL);
}
- table = policy->freq_table;
- if (!table) {
- pr_debug("%s: CPUFreq table not found\n", __func__);
- cool_dev = ERR_PTR(-ENODEV);
- goto put_policy;
+ i = cpufreq_table_count_valid_entries(policy);
+ if (!i) {
+ pr_debug("%s: CPUFreq table not found or has no valid entries\n",
+ __func__);
+ return ERR_PTR(-ENODEV);
}
- cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
- if (!cpufreq_dev) {
- cool_dev = ERR_PTR(-ENOMEM);
- goto put_policy;
- }
+ cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL);
+ if (!cpufreq_cdev)
+ return ERR_PTR(-ENOMEM);
- num_cpus = cpumask_weight(clip_cpus);
- cpufreq_dev->time_in_idle = kcalloc(num_cpus,
- sizeof(*cpufreq_dev->time_in_idle),
- GFP_KERNEL);
- if (!cpufreq_dev->time_in_idle) {
- cool_dev = ERR_PTR(-ENOMEM);
+ cpufreq_cdev->policy = policy;
+ num_cpus = cpumask_weight(policy->related_cpus);
+ cpufreq_cdev->idle_time = kcalloc(num_cpus,
+ sizeof(*cpufreq_cdev->idle_time),
+ GFP_KERNEL);
+ if (!cpufreq_cdev->idle_time) {
+ cdev = ERR_PTR(-ENOMEM);
goto free_cdev;
}
- cpufreq_dev->time_in_idle_timestamp =
- kcalloc(num_cpus, sizeof(*cpufreq_dev->time_in_idle_timestamp),
- GFP_KERNEL);
- if (!cpufreq_dev->time_in_idle_timestamp) {
- cool_dev = ERR_PTR(-ENOMEM);
- goto free_time_in_idle;
- }
-
- /* Find max levels */
- cpufreq_for_each_valid_entry(pos, table)
- cpufreq_dev->max_level++;
-
- cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
- cpufreq_dev->max_level, GFP_KERNEL);
- if (!cpufreq_dev->freq_table) {
- cool_dev = ERR_PTR(-ENOMEM);
- goto free_time_in_idle_timestamp;
- }
-
/* max_level is an index, not a counter */
- cpufreq_dev->max_level--;
-
- cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
-
- if (capacitance) {
- cpufreq_dev->plat_get_static_power = plat_static_func;
-
- ret = build_dyn_power_table(cpufreq_dev, capacitance);
- if (ret) {
- cool_dev = ERR_PTR(ret);
- goto free_table;
- }
-
- cooling_ops = &cpufreq_power_cooling_ops;
- } else {
- cooling_ops = &cpufreq_cooling_ops;
+ cpufreq_cdev->max_level = i - 1;
+
+ cpufreq_cdev->freq_table = kmalloc_array(i,
+ sizeof(*cpufreq_cdev->freq_table),
+ GFP_KERNEL);
+ if (!cpufreq_cdev->freq_table) {
+ cdev = ERR_PTR(-ENOMEM);
+ goto free_idle_time;
}
ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
if (ret < 0) {
- cool_dev = ERR_PTR(ret);
- goto free_power_table;
+ cdev = ERR_PTR(ret);
+ goto free_table;
}
- cpufreq_dev->id = ret;
+ cpufreq_cdev->id = ret;
+
+ snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
+ cpufreq_cdev->id);
/* Fill freq-table in descending order of frequencies */
- for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
- freq = find_next_max(table, freq);
- cpufreq_dev->freq_table[i] = freq;
+ for (i = 0, freq = -1; i <= cpufreq_cdev->max_level; i++) {
+ freq = find_next_max(policy->freq_table, freq);
+ cpufreq_cdev->freq_table[i].frequency = freq;
/* Warn for duplicate entries */
if (!freq)
@@ -868,51 +754,54 @@ __cpufreq_cooling_register(struct device_node *np,
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
- snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
- cpufreq_dev->id);
+ if (capacitance) {
+ cpufreq_cdev->plat_get_static_power = plat_static_func;
+
+ ret = update_freq_table(cpufreq_cdev, capacitance);
+ if (ret) {
+ cdev = ERR_PTR(ret);
+ goto remove_ida;
+ }
+
+ cooling_ops = &cpufreq_power_cooling_ops;
+ } else {
+ cooling_ops = &cpufreq_cooling_ops;
+ }
- cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
- cooling_ops);
- if (IS_ERR(cool_dev))
+ cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
+ cooling_ops);
+ if (IS_ERR(cdev))
goto remove_ida;
- cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
- cpufreq_dev->cool_dev = cool_dev;
+ cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
+ cpufreq_cdev->cdev = cdev;
mutex_lock(&cooling_list_lock);
/* Register the notifier for first cpufreq cooling device */
- first = list_empty(&cpufreq_dev_list);
- list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+ first = list_empty(&cpufreq_cdev_list);
+ list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
mutex_unlock(&cooling_list_lock);
if (first)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- goto put_policy;
+ return cdev;
remove_ida:
- ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
-free_power_table:
- kfree(cpufreq_dev->dyn_power_table);
+ ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_table:
- kfree(cpufreq_dev->freq_table);
-free_time_in_idle_timestamp:
- kfree(cpufreq_dev->time_in_idle_timestamp);
-free_time_in_idle:
- kfree(cpufreq_dev->time_in_idle);
+ kfree(cpufreq_cdev->freq_table);
+free_idle_time:
+ kfree(cpufreq_cdev->idle_time);
free_cdev:
- kfree(cpufreq_dev);
-put_policy:
- cpufreq_cpu_put(policy);
-free_cpumask:
- free_cpumask_var(temp_mask);
- return cool_dev;
+ kfree(cpufreq_cdev);
+ return cdev;
}
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ * @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -922,16 +811,16 @@ free_cpumask:
* on failure, it returns a corresponding ERR_PTR().
*/
struct thermal_cooling_device *
-cpufreq_cooling_register(const struct cpumask *clip_cpus)
+cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
+ return __cpufreq_cooling_register(NULL, policy, 0, NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
* of_cpufreq_cooling_register - function to create cpufreq cooling device.
* @np: a valid struct device_node to the cooling device device tree node
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ * @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -943,18 +832,18 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
*/
struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus)
+ struct cpufreq_policy *policy)
{
if (!np)
return ERR_PTR(-EINVAL);
- return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
+ return __cpufreq_cooling_register(np, policy, 0, NULL);
}
EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
/**
* cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @policy: cpufreq policy
* @capacitance: dynamic power coefficient for these cpus
* @plat_static_func: function to calculate the static power consumed by these
* cpus (optional)
@@ -974,10 +863,10 @@ EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
* on failure, it returns a corresponding ERR_PTR().
*/
struct thermal_cooling_device *
-cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance,
+cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance,
get_static_t plat_static_func)
{
- return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
+ return __cpufreq_cooling_register(NULL, policy, capacitance,
plat_static_func);
}
EXPORT_SYMBOL(cpufreq_power_cooling_register);
@@ -985,7 +874,7 @@ EXPORT_SYMBOL(cpufreq_power_cooling_register);
/**
* of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
* @np: a valid struct device_node to the cooling device device tree node
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @policy: cpufreq policy
* @capacitance: dynamic power coefficient for these cpus
* @plat_static_func: function to calculate the static power consumed by these
* cpus (optional)
@@ -1007,14 +896,14 @@ EXPORT_SYMBOL(cpufreq_power_cooling_register);
*/
struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus,
+ struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func)
{
if (!np)
return ERR_PTR(-EINVAL);
- return __cpufreq_cooling_register(np, clip_cpus, capacitance,
+ return __cpufreq_cooling_register(np, policy, capacitance,
plat_static_func);
}
EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
@@ -1027,30 +916,28 @@ EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
- struct cpufreq_cooling_device *cpufreq_dev;
+ struct cpufreq_cooling_device *cpufreq_cdev;
bool last;
if (!cdev)
return;
- cpufreq_dev = cdev->devdata;
+ cpufreq_cdev = cdev->devdata;
mutex_lock(&cooling_list_lock);
- list_del(&cpufreq_dev->node);
+ list_del(&cpufreq_cdev->node);
/* Unregister the notifier for the last cpufreq cooling device */
- last = list_empty(&cpufreq_dev_list);
+ last = list_empty(&cpufreq_cdev_list);
mutex_unlock(&cooling_list_lock);
if (last)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
- ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
- kfree(cpufreq_dev->dyn_power_table);
- kfree(cpufreq_dev->time_in_idle_timestamp);
- kfree(cpufreq_dev->time_in_idle);
- kfree(cpufreq_dev->freq_table);
- kfree(cpufreq_dev);
+ thermal_cooling_device_unregister(cpufreq_cdev->cdev);
+ ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
+ kfree(cpufreq_cdev->idle_time);
+ kfree(cpufreq_cdev->freq_table);
+ kfree(cpufreq_cdev);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index 68bd1b569118..d3469fbc5207 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -71,6 +71,7 @@ static long get_target_state(struct thermal_zone_device *tz,
/**
* fair_share_throttle - throttles devices associated with the given zone
* @tz - thermal_zone_device
+ * @trip - trip point index
*
* Throttling Logic: This uses three parameters to calculate the new
* throttle state of the cooling devices associated with the given zone.
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index f6429666a1cf..9c3ce341eb97 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -397,8 +397,11 @@ static int hisi_thermal_suspend(struct device *dev)
static int hisi_thermal_resume(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
+ int ret;
- clk_prepare_enable(data->clk);
+ ret = clk_prepare_enable(data->clk);
+ if (ret)
+ return ret;
data->irq_enabled = true;
hisi_thermal_enable_bind_irq_sensor(data);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index fb648a45754e..4798b4b1fd77 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk.h>
+#include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -88,6 +89,7 @@ static struct thermal_soc_data thermal_imx6sx_data = {
};
struct imx_thermal_data {
+ struct cpufreq_policy *policy;
struct thermal_zone_device *tz;
struct thermal_cooling_device *cdev;
enum thermal_device_mode mode;
@@ -525,13 +527,18 @@ static int imx_thermal_probe(struct platform_device *pdev)
regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
- data->cdev = cpufreq_cooling_register(cpu_present_mask);
+ data->policy = cpufreq_cpu_get(0);
+ if (!data->policy) {
+ pr_debug("%s: CPUFreq policy not found\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ data->cdev = cpufreq_cooling_register(data->policy);
if (IS_ERR(data->cdev)) {
ret = PTR_ERR(data->cdev);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to register cpufreq cooling device: %d\n",
- ret);
+ dev_err(&pdev->dev,
+ "failed to register cpufreq cooling device: %d\n", ret);
+ cpufreq_cpu_put(data->policy);
return ret;
}
@@ -542,6 +549,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"failed to get thermal clk: %d\n", ret);
cpufreq_cooling_unregister(data->cdev);
+ cpufreq_cpu_put(data->policy);
return ret;
}
@@ -556,6 +564,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
cpufreq_cooling_unregister(data->cdev);
+ cpufreq_cpu_put(data->policy);
return ret;
}
@@ -571,6 +580,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
"failed to register thermal zone device %d\n", ret);
clk_disable_unprepare(data->thermal_clk);
cpufreq_cooling_unregister(data->cdev);
+ cpufreq_cpu_put(data->policy);
return ret;
}
@@ -599,6 +609,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
clk_disable_unprepare(data->thermal_clk);
thermal_zone_device_unregister(data->tz);
cpufreq_cooling_unregister(data->cdev);
+ cpufreq_cpu_put(data->policy);
return ret;
}
@@ -620,6 +631,7 @@ static int imx_thermal_remove(struct platform_device *pdev)
thermal_zone_device_unregister(data->tz);
cpufreq_cooling_unregister(data->cdev);
+ cpufreq_cpu_put(data->policy);
return 0;
}
@@ -648,8 +660,11 @@ static int imx_thermal_resume(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
+ int ret;
- clk_prepare_enable(data->thermal_clk);
+ ret = clk_prepare_enable(data->thermal_clk);
+ if (ret)
+ return ret;
/* Enabled thermal sensor after resume */
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
index 2c2ec7666eb1..51ceb80212a7 100644
--- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -62,8 +62,8 @@ static int acpi_thermal_rel_release(struct inode *inode, struct file *file)
* acpi_parse_trt - Thermal Relationship Table _TRT for passive cooling
*
* @handle: ACPI handle of the device contains _TRT
- * @art_count: the number of valid entries resulted from parsing _TRT
- * @artp: pointer to pointer of array of art entries in parsing result
+ * @trt_count: the number of valid entries resulted from parsing _TRT
+ * @trtp: pointer to pointer of array of _TRT entries in parsing result
* @create_dev: whether to create platform devices for target and source
*
*/
@@ -208,7 +208,7 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
if (art->target) {
result = acpi_bus_get_device(art->target, &adev);
if (result)
- pr_warn("Failed to get source ACPI device\n");
+ pr_warn("Failed to get target ACPI device\n");
}
}
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index c4890c9437eb..8a7f24dd9315 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -238,8 +238,16 @@ static int int3403_add(struct platform_device *pdev)
status = acpi_evaluate_integer(priv->adev->handle, "PTYP",
NULL, &priv->type);
if (ACPI_FAILURE(status)) {
- result = -EINVAL;
- goto err;
+ unsigned long long tmp;
+
+ status = acpi_evaluate_integer(priv->adev->handle, "_TMP",
+ NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ result = -EINVAL;
+ goto err;
+ } else {
+ priv->type = INT3403_TYPE_SENSOR;
+ }
}
platform_set_drvdata(pdev, priv);
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index bcef2e7c4ec9..be95826631b7 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -186,8 +186,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
/**
* step_wise_throttle - throttles devices associated with the given zone
* @tz - thermal_zone_device
- * @trip - the trip point
- * @trip_type - type of the trip point
+ * @trip - trip point index
*
* Throttling Logic: This uses the trend of the thermal zone to throttle.
* If the thermal zone is 'heating up' this throttles all the cooling
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 02790f69e26c..c211a8e4a210 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/thermal.h>
+#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
#include <linux/of.h>
@@ -37,6 +38,7 @@
/* common data structures */
struct ti_thermal_data {
+ struct cpufreq_policy *policy;
struct thermal_zone_device *ti_thermal;
struct thermal_zone_device *pcb_tz;
struct thermal_cooling_device *cool_dev;
@@ -247,15 +249,19 @@ int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
if (!data)
return -EINVAL;
+ data->policy = cpufreq_cpu_get(0);
+ if (!data->policy) {
+ pr_debug("%s: CPUFreq policy not found\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
/* Register cooling device */
- data->cool_dev = cpufreq_cooling_register(cpu_present_mask);
+ data->cool_dev = cpufreq_cooling_register(data->policy);
if (IS_ERR(data->cool_dev)) {
int ret = PTR_ERR(data->cool_dev);
-
- if (ret != -EPROBE_DEFER)
- dev_err(bgp->dev,
- "Failed to register cpu cooling device %d\n",
- ret);
+ dev_err(bgp->dev, "Failed to register cpu cooling device %d\n",
+ ret);
+ cpufreq_cpu_put(data->policy);
return ret;
}
@@ -270,8 +276,10 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
data = ti_bandgap_get_sensor_data(bgp, id);
- if (data)
+ if (data) {
cpufreq_cooling_unregister(data->cool_dev);
+ cpufreq_cpu_put(data->policy);
+ }
return 0;
}
diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c
index c908150c268d..8e92a06ef48a 100644
--- a/drivers/thermal/user_space.c
+++ b/drivers/thermal/user_space.c
@@ -24,12 +24,13 @@
#include <linux/thermal.h>
#include <linux/slab.h>
+
#include "thermal_core.h"
/**
* notify_user_space - Notifies user space about thermal events
* @tz - thermal_zone_device
- * @trip - Trip point index
+ * @trip - trip point index
*
* This function notifies the user space through UEvents.
*/
diff --git a/drivers/tty/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
index 2a61dd6b4009..906ee770ff4a 100644
--- a/drivers/tty/serial/ioc3_serial.c
+++ b/drivers/tty/serial/ioc3_serial.c
@@ -377,7 +377,7 @@ static struct ioc3_port *get_ioc3_port(struct uart_port *the_port)
* called per port from attach...
* @port: port to initialize
*/
-static int inline port_init(struct ioc3_port *port)
+static inline int port_init(struct ioc3_port *port)
{
uint32_t sio_cr;
struct port_hooks *hooks = port->ip_hooks;
@@ -1430,7 +1430,7 @@ static int receive_chars(struct uart_port *the_port)
* @pending: interrupts to handle
*/
-static int inline
+static inline int
ioc3uart_intr_one(struct ioc3_submodule *is,
struct ioc3_driver_data *idd,
unsigned int pending)
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index f96bcf9bee25..43d7d32eb150 100644
--- a/drivers/tty/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
@@ -824,7 +824,7 @@ pending_intrs(struct ioc4_soft *soft, int type)
* called per port from attach...
* @port: port to initialize
*/
-static int inline port_init(struct ioc4_port *port)
+static inline int port_init(struct ioc4_port *port)
{
uint32_t sio_cr;
struct hooks *hooks = port->ip_hooks;
@@ -1048,7 +1048,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
* IOC4 with serial ports in the system.
* @idd: Master module data for this IOC4
*/
-static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
+static inline int ioc4_attach_local(struct ioc4_driver_data *idd)
{
struct ioc4_port *port;
struct ioc4_port *ports[IOC4_NUM_SERIAL_PORTS];
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 8a069aa154ed..27d7a7016298 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -180,7 +180,7 @@ static const __u16 crc10_table[256] = {
* Perform a memcpy and calculate fcs using ppp 10bit CRC algorithm. Return
* new 10 bit FCS.
*/
-static __u16 __inline__ fcs_compute10(unsigned char *sp, int len, __u16 fcs)
+static inline __u16 fcs_compute10(unsigned char *sp, int len, __u16 fcs)
{
for (; len-- > 0; fcs = CRC10_FCS(fcs, *sp++));
return fcs;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 324c52e3a1a4..063c1ce6fa42 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -195,11 +195,11 @@ static bool vfio_pci_nointx(struct pci_dev *pdev)
switch (pdev->vendor) {
case PCI_VENDOR_ID_INTEL:
switch (pdev->device) {
- /* All i40e (XL710/X710) 10/20/40GbE NICs */
+ /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
case 0x1572:
case 0x1574:
case 0x1580 ... 0x1581:
- case 0x1583 ... 0x1589:
+ case 0x1583 ... 0x158b:
case 0x37d0 ... 0x37d2:
return true;
default:
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 561084ab387f..330d50582f40 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -382,7 +382,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
if (IS_ERR(dev)) {
vfio_free_group_minor(minor);
vfio_group_unlock_and_free(group);
- return (struct vfio_group *)dev; /* ERR_PTR */
+ return ERR_CAST(dev);
}
group->minor = minor;
@@ -423,6 +423,34 @@ static void vfio_group_put(struct vfio_group *group)
kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
}
+struct vfio_group_put_work {
+ struct work_struct work;
+ struct vfio_group *group;
+};
+
+static void vfio_group_put_bg(struct work_struct *work)
+{
+ struct vfio_group_put_work *do_work;
+
+ do_work = container_of(work, struct vfio_group_put_work, work);
+
+ vfio_group_put(do_work->group);
+ kfree(do_work);
+}
+
+static void vfio_group_schedule_put(struct vfio_group *group)
+{
+ struct vfio_group_put_work *do_work;
+
+ do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
+ if (WARN_ON(!do_work))
+ return;
+
+ INIT_WORK(&do_work->work, vfio_group_put_bg);
+ do_work->group = group;
+ schedule_work(&do_work->work);
+}
+
/* Assume group_lock or group reference is held */
static void vfio_group_get(struct vfio_group *group)
{
@@ -762,7 +790,14 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
break;
}
- vfio_group_put(group);
+ /*
+ * If we're the last reference to the group, the group will be
+ * released, which includes unregistering the iommu group notifier.
+ * We hold a read-lock on that notifier list, unregistering needs
+ * a write-lock... deadlock. Release our reference asynchronously
+ * to avoid that situation.
+ */
+ vfio_group_schedule_put(group);
return NOTIFY_OK;
}
@@ -1140,15 +1175,11 @@ static long vfio_fops_unl_ioctl(struct file *filep,
ret = vfio_ioctl_set_iommu(container, arg);
break;
default:
- down_read(&container->group_lock);
-
driver = container->iommu_driver;
data = container->iommu_data;
if (driver) /* passthrough all unrecognized ioctls */
ret = driver->ops->ioctl(data, cmd, arg);
-
- up_read(&container->group_lock);
}
return ret;
@@ -1202,15 +1233,11 @@ static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
struct vfio_iommu_driver *driver;
ssize_t ret = -EINVAL;
- down_read(&container->group_lock);
-
driver = container->iommu_driver;
if (likely(driver && driver->ops->read))
ret = driver->ops->read(container->iommu_data,
buf, count, ppos);
- up_read(&container->group_lock);
-
return ret;
}
@@ -1221,15 +1248,11 @@ static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
struct vfio_iommu_driver *driver;
ssize_t ret = -EINVAL;
- down_read(&container->group_lock);
-
driver = container->iommu_driver;
if (likely(driver && driver->ops->write))
ret = driver->ops->write(container->iommu_data,
buf, count, ppos);
- up_read(&container->group_lock);
-
return ret;
}
@@ -1239,14 +1262,10 @@ static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
struct vfio_iommu_driver *driver;
int ret = -EINVAL;
- down_read(&container->group_lock);
-
driver = container->iommu_driver;
if (likely(driver && driver->ops->mmap))
ret = driver->ops->mmap(container->iommu_data, vma);
- up_read(&container->group_lock);
-
return ret;
}
@@ -1741,6 +1760,15 @@ void vfio_group_put_external_user(struct vfio_group *group)
}
EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
+bool vfio_external_group_match_file(struct vfio_group *test_group,
+ struct file *filep)
+{
+ struct vfio_group *group = filep->private_data;
+
+ return (filep->f_op == &vfio_group_fops) && (group == test_group);
+}
+EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
+
int vfio_external_user_iommu_id(struct vfio_group *group)
{
return iommu_group_id(group->iommu_group);
@@ -1949,8 +1977,6 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
goto err_pin_pages;
container = group->container;
- down_read(&container->group_lock);
-
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
@@ -1958,7 +1984,6 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
else
ret = -ENOTTY;
- up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
err_pin_pages:
@@ -1998,8 +2023,6 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
goto err_unpin_pages;
container = group->container;
- down_read(&container->group_lock);
-
driver = container->iommu_driver;
if (likely(driver && driver->ops->unpin_pages))
ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
@@ -2007,7 +2030,6 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
else
ret = -ENOTTY;
- up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
err_unpin_pages:
@@ -2029,8 +2051,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
return -EINVAL;
container = group->container;
- down_read(&container->group_lock);
-
driver = container->iommu_driver;
if (likely(driver && driver->ops->register_notifier))
ret = driver->ops->register_notifier(container->iommu_data,
@@ -2038,7 +2058,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
else
ret = -ENOTTY;
- up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
return ret;
@@ -2056,8 +2075,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
return -EINVAL;
container = group->container;
- down_read(&container->group_lock);
-
driver = container->iommu_driver;
if (likely(driver && driver->ops->unregister_notifier))
ret = driver->ops->unregister_notifier(container->iommu_data,
@@ -2065,7 +2082,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
else
ret = -ENOTTY;
- up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
return ret;
@@ -2083,7 +2099,6 @@ static int vfio_register_group_notifier(struct vfio_group *group,
unsigned long *events,
struct notifier_block *nb)
{
- struct vfio_container *container;
int ret;
bool set_kvm = false;
@@ -2101,9 +2116,6 @@ static int vfio_register_group_notifier(struct vfio_group *group,
if (ret)
return -EINVAL;
- container = group->container;
- down_read(&container->group_lock);
-
ret = blocking_notifier_chain_register(&group->notifier, nb);
/*
@@ -2114,7 +2126,6 @@ static int vfio_register_group_notifier(struct vfio_group *group,
blocking_notifier_call_chain(&group->notifier,
VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
- up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
return ret;
@@ -2123,19 +2134,14 @@ static int vfio_register_group_notifier(struct vfio_group *group,
static int vfio_unregister_group_notifier(struct vfio_group *group,
struct notifier_block *nb)
{
- struct vfio_container *container;
int ret;
ret = vfio_group_add_container_user(group);
if (ret)
return -EINVAL;
- container = group->container;
- down_read(&container->group_lock);
-
ret = blocking_notifier_chain_unregister(&group->notifier, nb);
- up_read(&container->group_lock);
vfio_group_try_dissolve_container(group);
return ret;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e3d7ea1288c6..06d044862e58 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -897,7 +897,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct sk_buff **queue;
int i;
- n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_REPEAT);
+ n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!n)
return -ENOMEM;
vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index fd6c8b66f06f..046f6d280af5 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -496,14 +496,12 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_event_work);
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
- struct vhost_scsi_evt *evt;
+ struct vhost_scsi_evt *evt, *t;
struct llist_node *llnode;
mutex_lock(&vq->mutex);
llnode = llist_del_all(&vs->vs_event_list);
- while (llnode) {
- evt = llist_entry(llnode, struct vhost_scsi_evt, list);
- llnode = llist_next(llnode);
+ llist_for_each_entry_safe(evt, t, llnode, list) {
vhost_scsi_do_evt_work(vs, evt);
vhost_scsi_free_evt(vs, evt);
}
@@ -529,10 +527,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
llnode = llist_del_all(&vs->vs_completion_list);
- while (llnode) {
- cmd = llist_entry(llnode, struct vhost_scsi_cmd,
- tvc_completion_list);
- llnode = llist_next(llnode);
+ llist_for_each_entry(cmd, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
@@ -1404,7 +1399,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
struct vhost_virtqueue **vqs;
int r = -ENOMEM, i;
- vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
if (!vs) {
vs = vzalloc(sizeof(*vs));
if (!vs)
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 3f63e03de8e8..c9de9c41aa97 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -508,7 +508,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
/* This struct is large and allocation could fail, fall back to vmalloc
* if there is no other way.
*/
- vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_REPEAT);
+ vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!vsock)
return -ENOMEM;
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index ec192a1bf297..d0d427a2f1a3 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -48,7 +48,7 @@ static DEFINE_SPINLOCK(mda_lock);
/* description of the hardware layout */
-static unsigned long mda_vram_base; /* Base of video memory */
+static u16 *mda_vram_base; /* Base of video memory */
static unsigned long mda_vram_len; /* Size of video memory */
static unsigned int mda_num_columns; /* Number of text columns */
static unsigned int mda_num_lines; /* Number of text lines */
@@ -205,13 +205,20 @@ static int mda_detect(void)
/* do a memory check */
- p = (u16 *) mda_vram_base;
- q = (u16 *) (mda_vram_base + 0x01000);
+ p = mda_vram_base;
+ q = mda_vram_base + 0x01000 / 2;
- p_save = scr_readw(p); q_save = scr_readw(q);
+ p_save = scr_readw(p);
+ q_save = scr_readw(q);
+
+ scr_writew(0xAA55, p);
+ if (scr_readw(p) == 0xAA55)
+ count++;
+
+ scr_writew(0x55AA, p);
+ if (scr_readw(p) == 0x55AA)
+ count++;
- scr_writew(0xAA55, p); if (scr_readw(p) == 0xAA55) count++;
- scr_writew(0x55AA, p); if (scr_readw(p) == 0x55AA) count++;
scr_writew(p_save, p);
if (count != 2) {
@@ -220,13 +227,18 @@ static int mda_detect(void)
/* check if we have 4K or 8K */
- scr_writew(0xA55A, q); scr_writew(0x0000, p);
- if (scr_readw(q) == 0xA55A) count++;
+ scr_writew(0xA55A, q);
+ scr_writew(0x0000, p);
+ if (scr_readw(q) == 0xA55A)
+ count++;
- scr_writew(0x5AA5, q); scr_writew(0x0000, p);
- if (scr_readw(q) == 0x5AA5) count++;
+ scr_writew(0x5AA5, q);
+ scr_writew(0x0000, p);
+ if (scr_readw(q) == 0x5AA5)
+ count++;
- scr_writew(p_save, p); scr_writew(q_save, q);
+ scr_writew(p_save, p);
+ scr_writew(q_save, q);
if (count == 4) {
mda_vram_len = 0x02000;
@@ -240,14 +252,12 @@ static int mda_detect(void)
/* Edward: These two mess `tests' mess up my cursor on bootup */
/* cursor low register */
- if (! test_mda_b(0x66, 0x0f)) {
+ if (!test_mda_b(0x66, 0x0f))
return 0;
- }
/* cursor low register */
- if (! test_mda_b(0x99, 0x0f)) {
+ if (!test_mda_b(0x99, 0x0f))
return 0;
- }
#endif
/* See if the card is a Hercules, by checking whether the vsync
@@ -257,25 +267,25 @@ static int mda_detect(void)
p_save = q_save = inb_p(mda_status_port) & MDA_STATUS_VSYNC;
- for (count=0; count < 50000 && p_save == q_save; count++) {
+ for (count = 0; count < 50000 && p_save == q_save; count++) {
q_save = inb(mda_status_port) & MDA_STATUS_VSYNC;
udelay(2);
}
if (p_save != q_save) {
switch (inb_p(mda_status_port) & 0x70) {
- case 0x10:
- mda_type = TYPE_HERCPLUS;
- mda_type_name = "HerculesPlus";
- break;
- case 0x50:
- mda_type = TYPE_HERCCOLOR;
- mda_type_name = "HerculesColor";
- break;
- default:
- mda_type = TYPE_HERC;
- mda_type_name = "Hercules";
- break;
+ case 0x10:
+ mda_type = TYPE_HERCPLUS;
+ mda_type_name = "HerculesPlus";
+ break;
+ case 0x50:
+ mda_type = TYPE_HERCCOLOR;
+ mda_type_name = "HerculesColor";
+ break;
+ default:
+ mda_type = TYPE_HERC;
+ mda_type_name = "Hercules";
+ break;
}
}
@@ -313,7 +323,7 @@ static const char *mdacon_startup(void)
mda_num_lines = 25;
mda_vram_len = 0x01000;
- mda_vram_base = VGA_MAP_MEM(0xb0000, mda_vram_len);
+ mda_vram_base = (u16 *)VGA_MAP_MEM(0xb0000, mda_vram_len);
mda_index_port = 0x3b4;
mda_value_port = 0x3b5;
@@ -410,17 +420,20 @@ static void mdacon_invert_region(struct vc_data *c, u16 *p, int count)
}
}
-#define MDA_ADDR(x,y) ((u16 *) mda_vram_base + (y)*mda_num_columns + (x))
+static inline u16 *mda_addr(unsigned int x, unsigned int y)
+{
+ return mda_vram_base + y * mda_num_columns + x;
+}
static void mdacon_putc(struct vc_data *c, int ch, int y, int x)
{
- scr_writew(mda_convert_attr(ch), MDA_ADDR(x, y));
+ scr_writew(mda_convert_attr(ch), mda_addr(x, y));
}
static void mdacon_putcs(struct vc_data *c, const unsigned short *s,
int count, int y, int x)
{
- u16 *dest = MDA_ADDR(x, y);
+ u16 *dest = mda_addr(x, y);
for (; count > 0; count--) {
scr_writew(mda_convert_attr(scr_readw(s++)), dest++);
@@ -430,7 +443,7 @@ static void mdacon_putcs(struct vc_data *c, const unsigned short *s,
static void mdacon_clear(struct vc_data *c, int y, int x,
int height, int width)
{
- u16 *dest = MDA_ADDR(x, y);
+ u16 *dest = mda_addr(x, y);
u16 eattr = mda_convert_attr(c->vc_video_erase_char);
if (width <= 0 || height <= 0)
@@ -453,7 +466,7 @@ static int mdacon_blank(struct vc_data *c, int blank, int mode_switch)
{
if (mda_type == TYPE_MDA) {
if (blank)
- scr_memsetw((void *)mda_vram_base,
+ scr_memsetw(mda_vram_base,
mda_convert_attr(c->vc_video_erase_char),
c->vc_screenbuf_size);
/* Tell console.c that it has to restore the screen itself */
@@ -502,16 +515,16 @@ static bool mdacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
switch (dir) {
case SM_UP:
- scr_memmovew(MDA_ADDR(0,t), MDA_ADDR(0,t+lines),
+ scr_memmovew(mda_addr(0, t), mda_addr(0, t + lines),
(b-t-lines)*mda_num_columns*2);
- scr_memsetw(MDA_ADDR(0,b-lines), eattr,
+ scr_memsetw(mda_addr(0, b - lines), eattr,
lines*mda_num_columns*2);
break;
case SM_DOWN:
- scr_memmovew(MDA_ADDR(0,t+lines), MDA_ADDR(0,t),
+ scr_memmovew(mda_addr(0, t + lines), mda_addr(0, t),
(b-t-lines)*mda_num_columns*2);
- scr_memsetw(MDA_ADDR(0,t), eattr, lines*mda_num_columns*2);
+ scr_memsetw(mda_addr(0, t), eattr, lines*mda_num_columns*2);
break;
}
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 11026e726b68..b55fdac9c9f5 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -802,7 +802,7 @@ static int aty_var_to_crtc(const struct fb_info *info,
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 xres, yres, vxres, vyres, xoffset, yoffset, bpp;
- u32 sync, vmode, vdisplay;
+ u32 sync, vmode;
u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol;
u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync;
u32 pix_width, dp_pix_width, dp_chain_mask;
@@ -984,12 +984,6 @@ static int aty_var_to_crtc(const struct fb_info *info,
v_total <<= 1;
}
- vdisplay = yres;
-#ifdef CONFIG_FB_ATY_GENERIC_LCD
- if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON))
- vdisplay = par->lcd_height;
-#endif
-
v_disp--;
v_sync_strt--;
v_sync_end--;
@@ -1036,7 +1030,7 @@ static int aty_var_to_crtc(const struct fb_info *info,
crtc->gen_cntl |= CRTC_INTERLACE_EN;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
- vdisplay = yres;
+ u32 vdisplay = yres;
if (vmode & FB_VMODE_DOUBLE)
vdisplay <<= 1;
crtc->gen_cntl &= ~(CRTC2_EN | CRTC2_PIX_WIDTH);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 5324358f110f..7a42238db446 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1483,7 +1483,7 @@ __releases(&info->lock)
return 0;
}
-#ifdef CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA
+#if defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && !defined(CONFIG_MMU)
unsigned long get_fb_unmapped_area(struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
@@ -1510,7 +1510,8 @@ static const struct file_operations fb_fops = {
.open = fb_open,
.release = fb_release,
#if defined(HAVE_ARCH_FB_UNMAPPED_AREA) || \
- defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA)
+ (defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && \
+ !defined(CONFIG_MMU))
.get_unmapped_area = get_fb_unmapped_area,
#endif
#ifdef CONFIG_FB_DEFERRED_IO
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index ca3d6b366471..25abbcf38913 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -388,7 +388,7 @@ struct fsl_diu_data {
/* Determine the DMA address of a member of the fsl_diu_data structure */
#define DMA_ADDR(p, f) ((p)->dma_addr + offsetof(struct fsl_diu_data, f))
-static struct mfb_info mfb_template[] = {
+static const struct mfb_info mfb_template[] = {
{
.index = PLANE0,
.id = "Panel0",
@@ -1868,7 +1868,7 @@ static int __init fsl_diu_setup(char *options)
}
#endif
-static struct of_device_id fsl_diu_match[] = {
+static const struct of_device_id fsl_diu_match[] = {
#ifdef CONFIG_PPC_MPC512x
{
.compatible = "fsl,mpc5121-diu",
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index 6b444400a86c..ffc391208b27 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -907,7 +907,7 @@ static void intelfb_pci_unregister(struct pci_dev *pdev)
* helper functions *
***************************************************************/
-int __inline__ intelfb_var_to_depth(const struct fb_var_screeninfo *var)
+__inline__ int intelfb_var_to_depth(const struct fb_var_screeninfo *var)
{
DBG_MSG("intelfb_var_to_depth: bpp: %d, green.length is %d\n",
var->bits_per_pixel, var->green.length);
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 11eb094396ae..f6a0b9af97a9 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -2001,7 +2001,7 @@ static void matroxfb_register_device(struct matrox_fb_info* minfo) {
for (drv = matroxfb_driver_l(matroxfb_driver_list.next);
drv != matroxfb_driver_l(&matroxfb_driver_list);
drv = matroxfb_driver_l(drv->node.next)) {
- if (drv && drv->probe) {
+ if (drv->probe) {
void *p = drv->probe(minfo);
if (p) {
minfo->drivers_data[i] = p;
diff --git a/drivers/video/fbdev/omap/lcdc.c b/drivers/video/fbdev/omap/lcdc.c
index e3d9b9ea5498..938cba0d24ae 100644
--- a/drivers/video/fbdev/omap/lcdc.c
+++ b/drivers/video/fbdev/omap/lcdc.c
@@ -79,12 +79,12 @@ static struct omap_lcd_controller {
unsigned long vram_size;
} lcdc;
-static void inline enable_irqs(int mask)
+static inline void enable_irqs(int mask)
{
lcdc.irq_mask |= mask;
}
-static void inline disable_irqs(int mask)
+static inline void disable_irqs(int mask)
{
lcdc.irq_mask &= ~mask;
}
@@ -466,7 +466,7 @@ static void calc_ck_div(int is_tft, int pck, int *pck_div)
}
}
-static void inline setup_regs(void)
+static inline void setup_regs(void)
{
u32 l;
struct lcd_panel *panel = lcdc.fbdev->panel;
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index f4cbfb3b8a09..3479a47a3082 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -62,7 +62,7 @@ struct caps_table_struct {
const char *name;
};
-static struct caps_table_struct ctrl_caps[] = {
+static const struct caps_table_struct ctrl_caps[] = {
{ OMAPFB_CAPS_MANUAL_UPDATE, "manual update" },
{ OMAPFB_CAPS_TEARSYNC, "tearing synchronization" },
{ OMAPFB_CAPS_PLANE_RELOCATE_MEM, "relocate plane memory" },
@@ -74,7 +74,7 @@ static struct caps_table_struct ctrl_caps[] = {
{ OMAPFB_CAPS_SET_BACKLIGHT, "backlight setting" },
};
-static struct caps_table_struct color_caps[] = {
+static const struct caps_table_struct color_caps[] = {
{ 1 << OMAPFB_COLOR_RGB565, "RGB565", },
{ 1 << OMAPFB_COLOR_YUV422, "YUV422", },
{ 1 << OMAPFB_COLOR_YUV420, "YUV420", },
@@ -1384,7 +1384,7 @@ static struct attribute *panel_attrs[] = {
NULL,
};
-static struct attribute_group panel_attr_grp = {
+static const struct attribute_group panel_attr_grp = {
.name = "panel",
.attrs = panel_attrs,
};
@@ -1406,7 +1406,7 @@ static struct attribute *ctrl_attrs[] = {
NULL,
};
-static struct attribute_group ctrl_attr_grp = {
+static const struct attribute_group ctrl_attr_grp = {
.name = "ctrl",
.attrs = ctrl_attrs,
};
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index fd2b372d0264..bef431530090 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -100,7 +100,7 @@ static void hw_guard_wait(struct panel_drv_data *ddata)
{
unsigned long wait = ddata->hw_guard_end - jiffies;
- if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
+ if ((long)wait > 0 && time_before_eq(wait, ddata->hw_guard_wait)) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(wait);
}
@@ -559,7 +559,7 @@ static struct attribute *dsicm_attrs[] = {
NULL,
};
-static struct attribute_group dsicm_attr_group = {
+static const struct attribute_group dsicm_attr_group = {
.attrs = dsicm_attrs,
};
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index 9e2a67fdf4d2..44b96af4ef4e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -182,22 +182,16 @@ static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr,
static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
const char *buf, size_t size)
{
- enum omap_dss_trans_key_type key_type;
struct omap_overlay_manager_info info;
int r;
- for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
- key_type < ARRAY_SIZE(trans_key_type_str); key_type++) {
- if (sysfs_streq(buf, trans_key_type_str[key_type]))
- break;
- }
-
- if (key_type == ARRAY_SIZE(trans_key_type_str))
- return -EINVAL;
+ r = sysfs_match_string(trans_key_type_str, buf);
+ if (r < 0)
+ return r;
mgr->get_manager_info(mgr, &info);
- info.trans_key_type = key_type;
+ info.trans_key_type = r;
r = mgr->set_manager_info(mgr, &info);
if (r)
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index b21a89b03fb4..c3d49e13643c 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -1436,7 +1436,10 @@ static void pxafb_enable_controller(struct pxafb_info *fbi)
pr_debug("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3);
/* enable LCD controller clock */
- clk_prepare_enable(fbi->clk);
+ if (clk_prepare_enable(fbi->clk)) {
+ pr_err("%s: Failed to prepare clock\n", __func__);
+ return;
+ }
if (fbi->lccr0 & LCCR0_LCDT)
return;
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 885ee3a563aa..c3a46506e47e 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -2301,7 +2301,7 @@ static int sh_mobile_lcdc_check_fb(struct backlight_device *bdev,
return (info->bl_dev == bdev);
}
-static struct backlight_ops sh_mobile_lcdc_bl_ops = {
+static const struct backlight_ops sh_mobile_lcdc_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = sh_mobile_lcdc_update_bl,
.get_brightness = sh_mobile_lcdc_get_brightness,
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 98af9e02959b..dc0e8d90d9cc 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -5,6 +5,9 @@
* Loosely based upon the vesafb driver.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -149,8 +152,8 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
* allowed by connector.
*/
if (sizeof(*m) + len > CONNECTOR_MAX_MSG_SIZE) {
- printk(KERN_WARNING "uvesafb: message too long (%d), "
- "can't execute task\n", (int)(sizeof(*m) + len));
+ pr_warn("message too long (%d), can't execute task\n",
+ (int)(sizeof(*m) + len));
return -E2BIG;
}
@@ -198,10 +201,8 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
*/
err = uvesafb_helper_start();
if (err) {
- printk(KERN_ERR "uvesafb: failed to execute %s\n",
- v86d_path);
- printk(KERN_ERR "uvesafb: make sure that the v86d "
- "helper is installed and executable\n");
+ pr_err("failed to execute %s\n", v86d_path);
+ pr_err("make sure that the v86d helper is installed and executable\n");
} else {
v86d_started = 1;
err = cn_netlink_send(m, 0, 0, gfp_any());
@@ -375,9 +376,8 @@ static u8 *uvesafb_vbe_state_save(struct uvesafb_par *par)
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
- printk(KERN_WARNING "uvesafb: VBE get state call "
- "failed (eax=0x%x, err=%d)\n",
- task->t.regs.eax, err);
+ pr_warn("VBE get state call failed (eax=0x%x, err=%d)\n",
+ task->t.regs.eax, err);
kfree(state);
state = NULL;
}
@@ -407,9 +407,8 @@ static void uvesafb_vbe_state_restore(struct uvesafb_par *par, u8 *state_buf)
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f)
- printk(KERN_WARNING "uvesafb: VBE state restore call "
- "failed (eax=0x%x, err=%d)\n",
- task->t.regs.eax, err);
+ pr_warn("VBE state restore call failed (eax=0x%x, err=%d)\n",
+ task->t.regs.eax, err);
uvesafb_free(task);
}
@@ -427,24 +426,22 @@ static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task,
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
- printk(KERN_ERR "uvesafb: Getting VBE info block failed "
- "(eax=0x%x, err=%d)\n", (u32)task->t.regs.eax,
- err);
+ pr_err("Getting VBE info block failed (eax=0x%x, err=%d)\n",
+ (u32)task->t.regs.eax, err);
return -EINVAL;
}
if (par->vbe_ib.vbe_version < 0x0200) {
- printk(KERN_ERR "uvesafb: Sorry, pre-VBE 2.0 cards are "
- "not supported.\n");
+ pr_err("Sorry, pre-VBE 2.0 cards are not supported\n");
return -EINVAL;
}
if (!par->vbe_ib.mode_list_ptr) {
- printk(KERN_ERR "uvesafb: Missing mode list!\n");
+ pr_err("Missing mode list!\n");
return -EINVAL;
}
- printk(KERN_INFO "uvesafb: ");
+ pr_info("");
/*
* Convert string pointers and the mode list pointer into
@@ -452,23 +449,24 @@ static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task,
* video adapter and its vendor.
*/
if (par->vbe_ib.oem_vendor_name_ptr)
- printk("%s, ",
+ pr_cont("%s, ",
((char *)task->buf) + par->vbe_ib.oem_vendor_name_ptr);
if (par->vbe_ib.oem_product_name_ptr)
- printk("%s, ",
+ pr_cont("%s, ",
((char *)task->buf) + par->vbe_ib.oem_product_name_ptr);
if (par->vbe_ib.oem_product_rev_ptr)
- printk("%s, ",
+ pr_cont("%s, ",
((char *)task->buf) + par->vbe_ib.oem_product_rev_ptr);
if (par->vbe_ib.oem_string_ptr)
- printk("OEM: %s, ",
+ pr_cont("OEM: %s, ",
((char *)task->buf) + par->vbe_ib.oem_string_ptr);
- printk("VBE v%d.%d\n", ((par->vbe_ib.vbe_version & 0xff00) >> 8),
- par->vbe_ib.vbe_version & 0xff);
+ pr_cont("VBE v%d.%d\n",
+ (par->vbe_ib.vbe_version & 0xff00) >> 8,
+ par->vbe_ib.vbe_version & 0xff);
return 0;
}
@@ -507,8 +505,7 @@ static int uvesafb_vbe_getmodes(struct uvesafb_ktask *task,
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
- printk(KERN_WARNING "uvesafb: Getting mode info block "
- "for mode 0x%x failed (eax=0x%x, err=%d)\n",
+ pr_warn("Getting mode info block for mode 0x%x failed (eax=0x%x, err=%d)\n",
*mode, (u32)task->t.regs.eax, err);
mode++;
par->vbe_modes_cnt--;
@@ -569,23 +566,20 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
+ task->t.regs.edi);
par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
- printk(KERN_INFO "uvesafb: protected mode interface info at "
- "%04x:%04x\n",
- (u16)task->t.regs.es, (u16)task->t.regs.edi);
- printk(KERN_INFO "uvesafb: pmi: set display start = %p, "
- "set palette = %p\n", par->pmi_start,
- par->pmi_pal);
+ pr_info("protected mode interface info at %04x:%04x\n",
+ (u16)task->t.regs.es, (u16)task->t.regs.edi);
+ pr_info("pmi: set display start = %p, set palette = %p\n",
+ par->pmi_start, par->pmi_pal);
if (par->pmi_base[3]) {
- printk(KERN_INFO "uvesafb: pmi: ports = ");
+ pr_info("pmi: ports =");
for (i = par->pmi_base[3]/2;
par->pmi_base[i] != 0xffff; i++)
- printk("%x ", par->pmi_base[i]);
- printk("\n");
+ pr_cont(" %x", par->pmi_base[i]);
+ pr_cont("\n");
if (par->pmi_base[i] != 0xffff) {
- printk(KERN_INFO "uvesafb: can't handle memory"
- " requests, pmi disabled\n");
+ pr_info("can't handle memory requests, pmi disabled\n");
par->ypan = par->pmi_setpal = 0;
}
}
@@ -634,17 +628,13 @@ static int uvesafb_vbe_getedid(struct uvesafb_ktask *task, struct fb_info *info)
return -EINVAL;
if ((task->t.regs.ebx & 0x3) == 3) {
- printk(KERN_INFO "uvesafb: VBIOS/hardware supports both "
- "DDC1 and DDC2 transfers\n");
+ pr_info("VBIOS/hardware supports both DDC1 and DDC2 transfers\n");
} else if ((task->t.regs.ebx & 0x3) == 2) {
- printk(KERN_INFO "uvesafb: VBIOS/hardware supports DDC2 "
- "transfers\n");
+ pr_info("VBIOS/hardware supports DDC2 transfers\n");
} else if ((task->t.regs.ebx & 0x3) == 1) {
- printk(KERN_INFO "uvesafb: VBIOS/hardware supports DDC1 "
- "transfers\n");
+ pr_info("VBIOS/hardware supports DDC1 transfers\n");
} else {
- printk(KERN_INFO "uvesafb: VBIOS/hardware doesn't support "
- "DDC transfers\n");
+ pr_info("VBIOS/hardware doesn't support DDC transfers\n");
return -EINVAL;
}
@@ -718,14 +708,12 @@ static void uvesafb_vbe_getmonspecs(struct uvesafb_ktask *task,
}
if (info->monspecs.gtf)
- printk(KERN_INFO
- "uvesafb: monitor limits: vf = %d Hz, hf = %d kHz, "
- "clk = %d MHz\n", info->monspecs.vfmax,
+ pr_info("monitor limits: vf = %d Hz, hf = %d kHz, clk = %d MHz\n",
+ info->monspecs.vfmax,
(int)(info->monspecs.hfmax / 1000),
(int)(info->monspecs.dclkmax / 1000000));
else
- printk(KERN_INFO "uvesafb: no monitor limits have been set, "
- "default refresh rate will be used\n");
+ pr_info("no monitor limits have been set, default refresh rate will be used\n");
/* Add VBE modes to the modelist. */
for (i = 0; i < par->vbe_modes_cnt; i++) {
@@ -779,8 +767,7 @@ static void uvesafb_vbe_getstatesize(struct uvesafb_ktask *task,
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
- printk(KERN_WARNING "uvesafb: VBE state buffer size "
- "cannot be determined (eax=0x%x, err=%d)\n",
+ pr_warn("VBE state buffer size cannot be determined (eax=0x%x, err=%d)\n",
task->t.regs.eax, err);
par->vbe_state_size = 0;
return;
@@ -815,8 +802,7 @@ static int uvesafb_vbe_init(struct fb_info *info)
if (par->pmi_setpal || par->ypan) {
if (__supported_pte_mask & _PAGE_NX) {
par->pmi_setpal = par->ypan = 0;
- printk(KERN_WARNING "uvesafb: NX protection is active, "
- "better not use the PMI.\n");
+ pr_warn("NX protection is active, better not use the PMI\n");
} else {
uvesafb_vbe_getpmi(task, par);
}
@@ -859,8 +845,7 @@ static int uvesafb_vbe_init_mode(struct fb_info *info)
goto gotmode;
}
}
- printk(KERN_INFO "uvesafb: requested VBE mode 0x%x is "
- "unavailable\n", vbemode);
+ pr_info("requested VBE mode 0x%x is unavailable\n", vbemode);
vbemode = 0;
}
@@ -1181,8 +1166,8 @@ static int uvesafb_open(struct fb_info *info, int user)
if (!cnt && par->vbe_state_size) {
buf = uvesafb_vbe_state_save(par);
if (IS_ERR(buf)) {
- printk(KERN_WARNING "uvesafb: save hardware state"
- "failed, error code is %ld!\n", PTR_ERR(buf));
+ pr_warn("save hardware state failed, error code is %ld!\n",
+ PTR_ERR(buf));
} else {
par->vbe_state_orig = buf;
}
@@ -1293,17 +1278,16 @@ setmode:
* use our own timings. Try again with the default timings.
*/
if (crtc != NULL) {
- printk(KERN_WARNING "uvesafb: mode switch failed "
- "(eax=0x%x, err=%d). Trying again with "
- "default timings.\n", task->t.regs.eax, err);
+ pr_warn("mode switch failed (eax=0x%x, err=%d) - trying again with default timings\n",
+ task->t.regs.eax, err);
uvesafb_reset(task);
kfree(crtc);
crtc = NULL;
info->var.pixclock = 0;
goto setmode;
} else {
- printk(KERN_ERR "uvesafb: mode switch failed (eax="
- "0x%x, err=%d)\n", task->t.regs.eax, err);
+ pr_err("mode switch failed (eax=0x%x, err=%d)\n",
+ task->t.regs.eax, err);
err = -EINVAL;
goto out;
}
@@ -1510,13 +1494,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
mode->bytes_per_scan_line;
if (par->ypan && info->var.yres_virtual > info->var.yres) {
- printk(KERN_INFO "uvesafb: scrolling: %s "
- "using protected mode interface, "
- "yres_virtual=%d\n",
+ pr_info("scrolling: %s using protected mode interface, yres_virtual=%d\n",
(par->ypan > 1) ? "ywrap" : "ypan",
info->var.yres_virtual);
} else {
- printk(KERN_INFO "uvesafb: scrolling: redraw\n");
+ pr_info("scrolling: redraw\n");
info->var.yres_virtual = info->var.yres;
par->ypan = 0;
}
@@ -1704,7 +1686,7 @@ static int uvesafb_probe(struct platform_device *dev)
err = uvesafb_vbe_init(info);
if (err) {
- printk(KERN_ERR "uvesafb: vbe_init() failed with %d\n", err);
+ pr_err("vbe_init() failed with %d\n", err);
goto out;
}
@@ -1726,15 +1708,15 @@ static int uvesafb_probe(struct platform_device *dev)
uvesafb_init_info(info, mode);
if (!request_region(0x3c0, 32, "uvesafb")) {
- printk(KERN_ERR "uvesafb: request region 0x3c0-0x3e0 failed\n");
+ pr_err("request region 0x3c0-0x3e0 failed\n");
err = -EIO;
goto out_mode;
}
if (!request_mem_region(info->fix.smem_start, info->fix.smem_len,
"uvesafb")) {
- printk(KERN_ERR "uvesafb: cannot reserve video memory at "
- "0x%lx\n", info->fix.smem_start);
+ pr_err("cannot reserve video memory at 0x%lx\n",
+ info->fix.smem_start);
err = -EIO;
goto out_reg;
}
@@ -1743,10 +1725,8 @@ static int uvesafb_probe(struct platform_device *dev)
uvesafb_ioremap(info);
if (!info->screen_base) {
- printk(KERN_ERR
- "uvesafb: abort, cannot ioremap 0x%x bytes of video "
- "memory at 0x%lx\n",
- info->fix.smem_len, info->fix.smem_start);
+ pr_err("abort, cannot ioremap 0x%x bytes of video memory at 0x%lx\n",
+ info->fix.smem_len, info->fix.smem_start);
err = -EIO;
goto out_mem;
}
@@ -1754,16 +1734,14 @@ static int uvesafb_probe(struct platform_device *dev)
platform_set_drvdata(dev, info);
if (register_framebuffer(info) < 0) {
- printk(KERN_ERR
- "uvesafb: failed to register framebuffer device\n");
+ pr_err("failed to register framebuffer device\n");
err = -EINVAL;
goto out_unmap;
}
- printk(KERN_INFO "uvesafb: framebuffer at 0x%lx, mapped to 0x%p, "
- "using %dk, total %dk\n", info->fix.smem_start,
- info->screen_base, info->fix.smem_len/1024,
- par->vbe_ib.total_memory * 64);
+ pr_info("framebuffer at 0x%lx, mapped to 0x%p, using %dk, total %dk\n",
+ info->fix.smem_start, info->screen_base,
+ info->fix.smem_len / 1024, par->vbe_ib.total_memory * 64);
fb_info(info, "%s frame buffer device\n", info->fix.id);
err = sysfs_create_group(&dev->dev.kobj, &uvesafb_dev_attgrp);
@@ -1871,8 +1849,7 @@ static int uvesafb_setup(char *options)
else if (this_opt[0] >= '0' && this_opt[0] <= '9') {
mode_option = this_opt;
} else {
- printk(KERN_WARNING
- "uvesafb: unrecognized option %s\n", this_opt);
+ pr_warn("unrecognized option %s\n", this_opt);
}
}
@@ -1931,8 +1908,7 @@ static int uvesafb_init(void)
err = driver_create_file(&uvesafb_driver.driver,
&driver_attr_v86d);
if (err) {
- printk(KERN_WARNING "uvesafb: failed to register "
- "attributes\n");
+ pr_warn("failed to register attributes\n");
err = 0;
}
}
diff --git a/drivers/video/fbdev/vermilion/cr_pll.c b/drivers/video/fbdev/vermilion/cr_pll.c
index ebc6e6e0dd0f..ba105c876bed 100644
--- a/drivers/video/fbdev/vermilion/cr_pll.c
+++ b/drivers/video/fbdev/vermilion/cr_pll.c
@@ -185,6 +185,7 @@ static int __init cr_pll_init(void)
if (err) {
printk(KERN_ERR
"Carillo Ranch failed to initialize vml_sys.\n");
+ iounmap(mch_regs_base);
pci_dev_put(mch_dev);
return err;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index d6950e0802b7..7bc88fd43cfc 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -134,11 +134,8 @@ struct vscsibk_pend {
struct page *pages[VSCSI_MAX_GRANTS];
struct se_cmd se_cmd;
-};
-struct scsiback_tmr {
- atomic_t tmr_complete;
- wait_queue_head_t tmr_wait;
+ struct completion tmr_done;
};
#define VSCSI_DEFAULT_SESSION_TAGS 128
@@ -599,36 +596,28 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
struct scsiback_tpg *tpg = pending_req->v2p->tpg;
struct scsiback_nexus *nexus = tpg->tpg_nexus;
struct se_cmd *se_cmd = &pending_req->se_cmd;
- struct scsiback_tmr *tmr;
u64 unpacked_lun = pending_req->v2p->lun;
int rc, err = FAILED;
- tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL);
- if (!tmr) {
- target_put_sess_cmd(se_cmd);
- goto err;
- }
-
- init_waitqueue_head(&tmr->tmr_wait);
+ init_completion(&pending_req->tmr_done);
rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
&pending_req->sense_buffer[0],
- unpacked_lun, tmr, act, GFP_KERNEL,
+ unpacked_lun, NULL, act, GFP_KERNEL,
tag, TARGET_SCF_ACK_KREF);
if (rc)
goto err;
- wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete));
+ wait_for_completion(&pending_req->tmr_done);
err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
SUCCESS : FAILED;
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
- transport_generic_free_cmd(&pending_req->se_cmd, 1);
+ transport_generic_free_cmd(&pending_req->se_cmd, 0);
return;
+
err:
- if (tmr)
- kfree(tmr);
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
}
@@ -1389,12 +1378,6 @@ static int scsiback_check_stop_free(struct se_cmd *se_cmd)
static void scsiback_release_cmd(struct se_cmd *se_cmd)
{
struct se_session *se_sess = se_cmd->se_sess;
- struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
-
- if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
- struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
- kfree(tmr);
- }
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
@@ -1455,11 +1438,10 @@ static int scsiback_queue_status(struct se_cmd *se_cmd)
static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd)
{
- struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
- struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
+ struct vscsibk_pend *pending_req = container_of(se_cmd,
+ struct vscsibk_pend, se_cmd);
- atomic_set(&tmr->tmr_complete, 1);
- wake_up(&tmr->tmr_wait);
+ complete(&pending_req->tmr_done);
}
static void scsiback_aborted_task(struct se_cmd *se_cmd)
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index c202930086ed..8fb89ddc6cc7 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -33,6 +33,7 @@
#include <linux/parser.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/seq_file.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
@@ -82,6 +83,13 @@ static const match_table_t tokens = {
{Opt_err, NULL}
};
+static const char *const v9fs_cache_modes[nr__p9_cache_modes] = {
+ [CACHE_NONE] = "none",
+ [CACHE_MMAP] = "mmap",
+ [CACHE_LOOSE] = "loose",
+ [CACHE_FSCACHE] = "fscache",
+};
+
/* Interpret mount options for cache mode */
static int get_cache_mode(char *s)
{
@@ -104,6 +112,58 @@ static int get_cache_mode(char *s)
return version;
}
+/*
+ * Display the mount options in /proc/mounts.
+ */
+int v9fs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct v9fs_session_info *v9ses = root->d_sb->s_fs_info;
+
+ if (v9ses->debug)
+ seq_printf(m, ",debug=%x", v9ses->debug);
+ if (!uid_eq(v9ses->dfltuid, V9FS_DEFUID))
+ seq_printf(m, ",dfltuid=%u",
+ from_kuid_munged(&init_user_ns, v9ses->dfltuid));
+ if (!gid_eq(v9ses->dfltgid, V9FS_DEFGID))
+ seq_printf(m, ",dfltgid=%u",
+ from_kgid_munged(&init_user_ns, v9ses->dfltgid));
+ if (v9ses->afid != ~0)
+ seq_printf(m, ",afid=%u", v9ses->afid);
+ if (strcmp(v9ses->uname, V9FS_DEFUSER) != 0)
+ seq_printf(m, ",uname=%s", v9ses->uname);
+ if (strcmp(v9ses->aname, V9FS_DEFANAME) != 0)
+ seq_printf(m, ",aname=%s", v9ses->aname);
+ if (v9ses->nodev)
+ seq_puts(m, ",nodevmap");
+ if (v9ses->cache)
+ seq_printf(m, ",%s", v9fs_cache_modes[v9ses->cache]);
+#ifdef CONFIG_9P_FSCACHE
+ if (v9ses->cachetag && v9ses->cache == CACHE_FSCACHE)
+ seq_printf(m, ",cachetag=%s", v9ses->cachetag);
+#endif
+
+ switch (v9ses->flags & V9FS_ACCESS_MASK) {
+ case V9FS_ACCESS_USER:
+ seq_puts(m, ",access=user");
+ break;
+ case V9FS_ACCESS_ANY:
+ seq_puts(m, ",access=any");
+ break;
+ case V9FS_ACCESS_CLIENT:
+ seq_puts(m, ",access=client");
+ break;
+ case V9FS_ACCESS_SINGLE:
+ seq_printf(m, ",access=%u",
+ from_kuid_munged(&init_user_ns, v9ses->uid));
+ break;
+ }
+
+ if (v9ses->flags & V9FS_POSIX_ACL)
+ seq_puts(m, ",posixacl");
+
+ return p9_show_client_options(m, v9ses->clnt);
+}
+
/**
* v9fs_parse_options - parse mount options into session structure
* @v9ses: existing v9fs session information
@@ -230,6 +290,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
break;
case Opt_cachetag:
#ifdef CONFIG_9P_FSCACHE
+ kfree(v9ses->cachetag);
v9ses->cachetag = match_strdup(&args[0]);
#endif
break;
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 76eaf49abd3a..982e017acadb 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -67,6 +67,7 @@ enum p9_cache_modes {
CACHE_MMAP,
CACHE_LOOSE,
CACHE_FSCACHE,
+ nr__p9_cache_modes
};
/**
@@ -137,6 +138,8 @@ static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
return container_of(inode, struct v9fs_inode, vfs_inode);
}
+extern int v9fs_show_options(struct seq_file *m, struct dentry *root);
+
struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
char *);
extern void v9fs_session_close(struct v9fs_session_info *v9ses);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index a0965fb587a5..8b75463cb211 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -33,7 +33,6 @@
#include <linux/string.h>
#include <linux/inet.h>
#include <linux/pagemap.h>
-#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/idr.h>
#include <linux/sched.h>
@@ -104,7 +103,6 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_flags |= MS_POSIXACL;
#endif
- save_mount_options(sb, data);
return 0;
}
@@ -349,7 +347,7 @@ static const struct super_operations v9fs_super_ops = {
.destroy_inode = v9fs_destroy_inode,
.statfs = simple_statfs,
.evict_inode = v9fs_evict_inode,
- .show_options = generic_show_options,
+ .show_options = v9fs_show_options,
.umount_begin = v9fs_umount_begin,
.write_inode = v9fs_write_inode,
};
@@ -360,7 +358,7 @@ static const struct super_operations v9fs_super_ops_dotl = {
.statfs = v9fs_statfs,
.drop_inode = v9fs_drop_inode,
.evict_inode = v9fs_evict_inode,
- .show_options = generic_show_options,
+ .show_options = v9fs_show_options,
.umount_begin = v9fs_umount_begin,
.write_inode = v9fs_write_inode_dotl,
};
diff --git a/fs/Kconfig b/fs/Kconfig
index b0e42b6a96b9..7aee6d699fd6 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -80,7 +80,6 @@ config EXPORTFS_BLOCK_OPS
config FILE_LOCKING
bool "Enable POSIX file locking API" if EXPERT
default y
- select PERCPU_RWSEM
help
This option enables standard file locking support, required
for filesystems like NFS and for the flock() system
diff --git a/fs/affs/super.c b/fs/affs/super.c
index c2c27a8f128e..7bf47a41cb4f 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -20,9 +20,11 @@
#include <linux/slab.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
+#include <linux/seq_file.h>
#include "affs.h"
static int affs_statfs(struct dentry *dentry, struct kstatfs *buf);
+static int affs_show_options(struct seq_file *m, struct dentry *root);
static int affs_remount (struct super_block *sb, int *flags, char *data);
static void
@@ -159,7 +161,7 @@ static const struct super_operations affs_sops = {
.sync_fs = affs_sync_fs,
.statfs = affs_statfs,
.remount_fs = affs_remount,
- .show_options = generic_show_options,
+ .show_options = affs_show_options,
};
enum {
@@ -293,6 +295,40 @@ parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved,
return 1;
}
+static int affs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct super_block *sb = root->d_sb;
+ struct affs_sb_info *sbi = AFFS_SB(sb);
+
+ if (sb->s_blocksize)
+ seq_printf(m, ",bs=%lu", sb->s_blocksize);
+ if (affs_test_opt(sbi->s_flags, SF_SETMODE))
+ seq_printf(m, ",mode=%o", sbi->s_mode);
+ if (affs_test_opt(sbi->s_flags, SF_MUFS))
+ seq_puts(m, ",mufs");
+ if (affs_test_opt(sbi->s_flags, SF_NO_TRUNCATE))
+ seq_puts(m, ",nofilenametruncate");
+ if (affs_test_opt(sbi->s_flags, SF_PREFIX))
+ seq_printf(m, ",prefix=%s", sbi->s_prefix);
+ if (affs_test_opt(sbi->s_flags, SF_IMMUTABLE))
+ seq_puts(m, ",protect");
+ if (sbi->s_reserved != 2)
+ seq_printf(m, ",reserved=%u", sbi->s_reserved);
+ if (sbi->s_root_block != (sbi->s_reserved + sbi->s_partition_size - 1) / 2)
+ seq_printf(m, ",root=%u", sbi->s_root_block);
+ if (affs_test_opt(sbi->s_flags, SF_SETGID))
+ seq_printf(m, ",setgid=%u",
+ from_kgid_munged(&init_user_ns, sbi->s_gid));
+ if (affs_test_opt(sbi->s_flags, SF_SETUID))
+ seq_printf(m, ",setuid=%u",
+ from_kuid_munged(&init_user_ns, sbi->s_uid));
+ if (affs_test_opt(sbi->s_flags, SF_VERBOSE))
+ seq_puts(m, ",verbose");
+ if (sbi->s_volume[0])
+ seq_printf(m, ",volume=%s", sbi->s_volume);
+ return 0;
+}
+
/* This function definitely needs to be split up. Some fine day I'll
* hopefully have the guts to do so. Until then: sorry for the mess.
*/
@@ -316,8 +352,6 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
u8 sig[4];
int ret;
- save_mount_options(sb, data);
-
pr_debug("read_super(%s)\n", data ? (const char *)data : "no options");
sb->s_magic = AFFS_SUPER_MAGIC;
@@ -548,8 +582,6 @@ affs_remount(struct super_block *sb, int *flags, char *data)
}
flush_delayed_work(&sbi->sb_work);
- if (new_opts)
- replace_mount_options(sb, new_opts);
sbi->s_flags = mount_flags;
sbi->s_mode = mode;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 67680c2d96cf..689173c0a682 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -37,6 +37,8 @@ static void afs_kill_super(struct super_block *sb);
static struct inode *afs_alloc_inode(struct super_block *sb);
static void afs_destroy_inode(struct inode *inode);
static int afs_statfs(struct dentry *dentry, struct kstatfs *buf);
+static int afs_show_devname(struct seq_file *m, struct dentry *root);
+static int afs_show_options(struct seq_file *m, struct dentry *root);
struct file_system_type afs_fs_type = {
.owner = THIS_MODULE,
@@ -53,7 +55,8 @@ static const struct super_operations afs_super_ops = {
.drop_inode = afs_drop_inode,
.destroy_inode = afs_destroy_inode,
.evict_inode = afs_evict_inode,
- .show_options = generic_show_options,
+ .show_devname = afs_show_devname,
+ .show_options = afs_show_options,
};
static struct kmem_cache *afs_inode_cachep;
@@ -136,6 +139,45 @@ void __exit afs_fs_exit(void)
}
/*
+ * Display the mount device name in /proc/mounts.
+ */
+static int afs_show_devname(struct seq_file *m, struct dentry *root)
+{
+ struct afs_super_info *as = root->d_sb->s_fs_info;
+ struct afs_volume *volume = as->volume;
+ struct afs_cell *cell = volume->cell;
+ const char *suf = "";
+ char pref = '%';
+
+ switch (volume->type) {
+ case AFSVL_RWVOL:
+ break;
+ case AFSVL_ROVOL:
+ pref = '#';
+ if (volume->type_force)
+ suf = ".readonly";
+ break;
+ case AFSVL_BACKVOL:
+ pref = '#';
+ suf = ".backup";
+ break;
+ }
+
+ seq_printf(m, "%c%s:%s%s", pref, cell->name, volume->vlocation->vldb.name, suf);
+ return 0;
+}
+
+/*
+ * Display the mount options in /proc/mounts.
+ */
+static int afs_show_options(struct seq_file *m, struct dentry *root)
+{
+ if (test_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(d_inode(root))->flags))
+ seq_puts(m, "autocell");
+ return 0;
+}
+
+/*
* parse the mount options
* - this function has been shamelessly adapted from the ext3 fs which
* shamelessly adapted it from the msdos fs
@@ -427,7 +469,6 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
deactivate_locked_super(sb);
goto error;
}
- save_mount_options(sb, new_opts);
sb->s_flags |= MS_ACTIVE;
} else {
_debug("reuse");
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
index d509887c580c..1b7e0f7128d6 100644
--- a/fs/befs/btree.c
+++ b/fs/befs/btree.c
@@ -120,18 +120,15 @@ static int befs_compare_strings(const void *key1, int keylen1,
const void *key2, int keylen2);
/**
- * befs_bt_read_super - read in btree superblock convert to cpu byteorder
- * @sb: Filesystem superblock
- * @ds: Datastream to read from
- * @sup: Buffer in which to place the btree superblock
+ * befs_bt_read_super() - read in btree superblock convert to cpu byteorder
+ * @sb: Filesystem superblock
+ * @ds: Datastream to read from
+ * @sup: Buffer in which to place the btree superblock
*
* Calls befs_read_datastream to read in the btree superblock and
* makes sure it is in cpu byteorder, byteswapping if necessary.
- *
- * On success, returns BEFS_OK and *@sup contains the btree superblock,
- * in cpu byte order.
- *
- * On failure, BEFS_ERR is returned.
+ * Return: BEFS_OK on success and if *@sup contains the btree superblock in cpu
+ * byte order. Otherwise return BEFS_ERR on error.
*/
static int
befs_bt_read_super(struct super_block *sb, const befs_data_stream *ds,
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 63e7c4760bfb..4a4a5a366158 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/exportfs.h>
+#include <linux/seq_file.h>
#include "befs.h"
#include "btree.h"
@@ -53,6 +54,7 @@ static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
static void befs_put_super(struct super_block *);
static int befs_remount(struct super_block *, int *, char *);
static int befs_statfs(struct dentry *, struct kstatfs *);
+static int befs_show_options(struct seq_file *, struct dentry *);
static int parse_options(char *, struct befs_mount_options *);
static struct dentry *befs_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type);
@@ -66,7 +68,7 @@ static const struct super_operations befs_sops = {
.put_super = befs_put_super, /* uninit super */
.statfs = befs_statfs, /* statfs */
.remount_fs = befs_remount,
- .show_options = generic_show_options,
+ .show_options = befs_show_options,
};
/* slab cache for befs_inode_info objects */
@@ -771,6 +773,24 @@ parse_options(char *options, struct befs_mount_options *opts)
return 1;
}
+static int befs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct befs_sb_info *befs_sb = BEFS_SB(root->d_sb);
+ struct befs_mount_options *opts = &befs_sb->mount_opts;
+
+ if (!uid_eq(opts->uid, GLOBAL_ROOT_UID))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, opts->uid));
+ if (!gid_eq(opts->gid, GLOBAL_ROOT_GID))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, opts->gid));
+ if (opts->iocharset)
+ seq_printf(m, ",charset=%s", opts->iocharset);
+ if (opts->debug)
+ seq_puts(m, ",debug");
+ return 0;
+}
+
/* This function has the responsibiltiy of getting the
* filesystem ready for unmounting.
* Basically, we free everything that we allocated in
@@ -804,8 +824,6 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
const off_t x86_sb_off = 512;
int blocksize;
- save_mount_options(sb, data);
-
sb->s_fs_info = kzalloc(sizeof(*befs_sb), GFP_KERNEL);
if (sb->s_fs_info == NULL)
goto unacquire_none;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 25e312cb6071..9a69392f1fb3 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -419,7 +419,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
if (i_sblock > info->si_blocks ||
i_eblock > info->si_blocks ||
i_sblock > i_eblock ||
- i_eoff > s_size ||
+ (i_eoff != le32_to_cpu(-1) && i_eoff > s_size) ||
i_sblock * BFS_BSIZE > i_eoff) {
printf("Inode 0x%08x corrupted\n", i);
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 2edcefc0a294..a1e6860b6f46 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -422,9 +422,9 @@ static int load_flat_file(struct linux_binprm *bprm,
{
struct flat_hdr *hdr;
unsigned long textpos, datapos, realdatastart;
- unsigned long text_len, data_len, bss_len, stack_len, full_data, flags;
+ u32 text_len, data_len, bss_len, stack_len, full_data, flags;
unsigned long len, memp, memp_size, extra, rlim;
- unsigned long __user *reloc, *rp;
+ u32 __user *reloc, *rp;
struct inode *inode;
int i, rev, relocs;
loff_t fpos;
@@ -574,7 +574,7 @@ static int load_flat_file(struct linux_binprm *bprm,
MAX_SHARED_LIBS * sizeof(unsigned long),
FLAT_DATA_ALIGN);
- pr_debug("Allocated data+bss+stack (%ld bytes): %lx\n",
+ pr_debug("Allocated data+bss+stack (%u bytes): %lx\n",
data_len + bss_len + stack_len, datapos);
fpos = ntohl(hdr->data_start);
@@ -596,13 +596,13 @@ static int load_flat_file(struct linux_binprm *bprm,
goto err;
}
- reloc = (unsigned long __user *)
+ reloc = (u32 __user *)
(datapos + (ntohl(hdr->reloc_start) - text_len));
memp = realdatastart;
memp_size = len;
} else {
- len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
+ len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(u32);
len = PAGE_ALIGN(len);
textpos = vm_mmap(NULL, 0, len,
PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
@@ -618,10 +618,10 @@ static int load_flat_file(struct linux_binprm *bprm,
realdatastart = textpos + ntohl(hdr->data_start);
datapos = ALIGN(realdatastart +
- MAX_SHARED_LIBS * sizeof(unsigned long),
+ MAX_SHARED_LIBS * sizeof(u32),
FLAT_DATA_ALIGN);
- reloc = (unsigned long __user *)
+ reloc = (u32 __user *)
(datapos + (ntohl(hdr->reloc_start) - text_len));
memp = textpos;
memp_size = len;
@@ -694,7 +694,7 @@ static int load_flat_file(struct linux_binprm *bprm,
ret = result;
pr_err("Unable to read code+data+bss, errno %d\n", ret);
vm_munmap(textpos, text_len + data_len + extra +
- MAX_SHARED_LIBS * sizeof(unsigned long));
+ MAX_SHARED_LIBS * sizeof(u32));
goto err;
}
}
@@ -754,8 +754,8 @@ static int load_flat_file(struct linux_binprm *bprm,
* image.
*/
if (flags & FLAT_FLAG_GOTPIC) {
- for (rp = (unsigned long __user *)datapos; ; rp++) {
- unsigned long addr, rp_val;
+ for (rp = (u32 __user *)datapos; ; rp++) {
+ u32 addr, rp_val;
if (get_user(rp_val, rp))
return -EFAULT;
if (rp_val == 0xffffffff)
@@ -784,9 +784,9 @@ static int load_flat_file(struct linux_binprm *bprm,
* __start to address 4 so that is okay).
*/
if (rev > OLD_FLAT_VERSION) {
- unsigned long __maybe_unused persistent = 0;
+ u32 __maybe_unused persistent = 0;
for (i = 0; i < relocs; i++) {
- unsigned long addr, relval;
+ u32 addr, relval;
/*
* Get the address of the pointer to be
@@ -799,15 +799,18 @@ static int load_flat_file(struct linux_binprm *bprm,
if (flat_set_persistent(relval, &persistent))
continue;
addr = flat_get_relocate_addr(relval);
- rp = (unsigned long __user *)calc_reloc(addr, libinfo, id, 1);
- if (rp == (unsigned long __user *)RELOC_FAILED) {
+ rp = (u32 __user *)calc_reloc(addr, libinfo, id, 1);
+ if (rp == (u32 __user *)RELOC_FAILED) {
ret = -ENOEXEC;
goto err;
}
/* Get the pointer's value. */
- addr = flat_get_addr_from_rp(rp, relval, flags,
- &persistent);
+ ret = flat_get_addr_from_rp(rp, relval, flags,
+ &addr, &persistent);
+ if (unlikely(ret))
+ goto err;
+
if (addr != 0) {
/*
* Do the relocation. PIC relocs in the data section are
@@ -822,12 +825,14 @@ static int load_flat_file(struct linux_binprm *bprm,
}
/* Write back the relocated pointer. */
- flat_put_addr_at_rp(rp, addr, relval);
+ ret = flat_put_addr_at_rp(rp, addr, relval);
+ if (unlikely(ret))
+ goto err;
}
}
} else {
for (i = 0; i < relocs; i++) {
- unsigned long relval;
+ u32 relval;
if (get_user(relval, reloc + i))
return -EFAULT;
relval = ntohl(relval);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 2c0b7b57fcd5..d2ef9ac2a630 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -152,6 +152,7 @@ csum_failed:
* we have verified the checksum already, set page
* checked so the end_io handlers know about it
*/
+ ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, cb->orig_bio, i)
SetPageChecked(bvec->bv_page);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 086dcbadce09..080e2ebb8aa0 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -964,6 +964,7 @@ static blk_status_t btree_csum_one_bio(struct bio *bio)
struct btrfs_root *root;
int i, ret = 0;
+ ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, i) {
root = BTRFS_I(bvec->bv_page->mapping->host)->root;
ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 556484cf5d93..0aff9b278c19 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2258,7 +2258,7 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
return 0;
}
-int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
+bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
struct io_failure_record *failrec, int failed_mirror)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -2274,7 +2274,7 @@ int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
btrfs_debug(fs_info,
"Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
num_copies, failrec->this_mirror, failed_mirror);
- return 0;
+ return false;
}
/*
@@ -2315,10 +2315,10 @@ int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
btrfs_debug(fs_info,
"Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
num_copies, failrec->this_mirror, failed_mirror);
- return 0;
+ return false;
}
- return 1;
+ return true;
}
@@ -2382,8 +2382,8 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
if (ret)
return ret;
- ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
- if (!ret) {
+ if (!btrfs_check_repairable(inode, failed_bio, failrec,
+ failed_mirror)) {
free_io_failure(failure_tree, tree, failrec);
return -EIO;
}
@@ -2396,10 +2396,6 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
start - page_offset(page),
(int)phy_offset, failed_bio->bi_end_io,
NULL);
- if (!bio) {
- free_io_failure(failure_tree, tree, failrec);
- return -EIO;
- }
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
btrfs_debug(btrfs_sb(inode->i_sb),
@@ -2456,6 +2452,7 @@ static void end_bio_extent_writepage(struct bio *bio)
u64 end;
int i;
+ ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
@@ -2526,6 +2523,7 @@ static void end_bio_extent_readpage(struct bio *bio)
int ret;
int i;
+ ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
@@ -3680,6 +3678,7 @@ static void end_bio_extent_buffer_writepage(struct bio *bio)
struct extent_buffer *eb;
int i, done;
+ ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 3fb8513bf02e..4f030912f3ef 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -539,8 +539,8 @@ void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
u64 end);
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
struct io_failure_record **failrec_ret);
-int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
- struct io_failure_record *failrec, int fail_mirror);
+bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
+ struct io_failure_record *failrec, int fail_mirror);
struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
struct io_failure_record *failrec,
struct page *page, int pg_offset, int icsum,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 06dea7c89bbd..95c212037095 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8016,10 +8016,6 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
isector >>= inode->i_sb->s_blocksize_bits;
bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
pgoff, isector, repair_endio, repair_arg);
- if (!bio) {
- free_io_failure(failure_tree, io_tree, failrec);
- return -EIO;
- }
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
btrfs_debug(BTRFS_I(inode)->root->fs_info,
@@ -8059,6 +8055,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
done->uptodate = 1;
+ ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, i)
clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree,
io_tree, done->start, bvec->bv_page,
@@ -8150,6 +8147,7 @@ static void btrfs_retry_endio(struct bio *bio)
io_tree = &BTRFS_I(inode)->io_tree;
failure_tree = &BTRFS_I(inode)->io_failure_tree;
+ ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, i) {
ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
bvec->bv_offset, done->start,
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 6f845d219cd6..208638384cd2 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1136,20 +1136,27 @@ static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
static void index_rbio_pages(struct btrfs_raid_bio *rbio)
{
struct bio *bio;
- struct bio_vec *bvec;
u64 start;
unsigned long stripe_offset;
unsigned long page_index;
- int i;
spin_lock_irq(&rbio->bio_list_lock);
bio_list_for_each(bio, &rbio->bio_list) {
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ int i = 0;
+
start = (u64)bio->bi_iter.bi_sector << 9;
stripe_offset = start - rbio->bbio->raid_map[0];
page_index = stripe_offset >> PAGE_SHIFT;
- bio_for_each_segment_all(bvec, bio, i)
- rbio->bio_pages[page_index + i] = bvec->bv_page;
+ if (bio_flagged(bio, BIO_CLONED))
+ bio->bi_iter = btrfs_io_bio(bio)->iter;
+
+ bio_for_each_segment(bvec, bio, iter) {
+ rbio->bio_pages[page_index + i] = bvec.bv_page;
+ i++;
+ }
}
spin_unlock_irq(&rbio->bio_list_lock);
}
@@ -1423,11 +1430,14 @@ static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
*/
static void set_bio_pages_uptodate(struct bio *bio)
{
- struct bio_vec *bvec;
- int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
+ if (bio_flagged(bio, BIO_CLONED))
+ bio->bi_iter = btrfs_io_bio(bio)->iter;
- bio_for_each_segment_all(bvec, bio, i)
- SetPageUptodate(bvec->bv_page);
+ bio_for_each_segment(bvec, bio, iter)
+ SetPageUptodate(bvec.bv_page);
}
/*
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index e937c10b8287..b082210df9c8 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1856,7 +1856,7 @@ out:
*/
static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
const char *name, int name_len,
- u64 *who_ino, u64 *who_gen)
+ u64 *who_ino, u64 *who_gen, u64 *who_mode)
{
int ret = 0;
u64 gen;
@@ -1905,7 +1905,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
if (other_inode > sctx->send_progress ||
is_waiting_for_move(sctx, other_inode)) {
ret = get_inode_info(sctx->parent_root, other_inode, NULL,
- who_gen, NULL, NULL, NULL, NULL);
+ who_gen, who_mode, NULL, NULL, NULL);
if (ret < 0)
goto out;
@@ -3683,6 +3683,36 @@ out:
return ret;
}
+static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
+{
+ int ret;
+ struct fs_path *new_path;
+
+ /*
+ * Our reference's name member points to its full_path member string, so
+ * we use here a new path.
+ */
+ new_path = fs_path_alloc();
+ if (!new_path)
+ return -ENOMEM;
+
+ ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
+ if (ret < 0) {
+ fs_path_free(new_path);
+ return ret;
+ }
+ ret = fs_path_add(new_path, ref->name, ref->name_len);
+ if (ret < 0) {
+ fs_path_free(new_path);
+ return ret;
+ }
+
+ fs_path_free(ref->full_path);
+ set_ref_path(ref, new_path);
+
+ return 0;
+}
+
/*
* This does all the move/link/unlink/rmdir magic.
*/
@@ -3696,10 +3726,12 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
struct fs_path *valid_path = NULL;
u64 ow_inode = 0;
u64 ow_gen;
+ u64 ow_mode;
int did_overwrite = 0;
int is_orphan = 0;
u64 last_dir_ino_rm = 0;
bool can_rename = true;
+ bool orphanized_dir = false;
bool orphanized_ancestor = false;
btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
@@ -3798,7 +3830,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
*/
ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
cur->name, cur->name_len,
- &ow_inode, &ow_gen);
+ &ow_inode, &ow_gen, &ow_mode);
if (ret < 0)
goto out;
if (ret) {
@@ -3815,6 +3847,8 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
cur->full_path);
if (ret < 0)
goto out;
+ if (S_ISDIR(ow_mode))
+ orphanized_dir = true;
/*
* If ow_inode has its rename operation delayed
@@ -3920,6 +3954,18 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
if (ret < 0)
goto out;
} else {
+ /*
+ * We might have previously orphanized an inode
+ * which is an ancestor of our current inode,
+ * so our reference's full path, which was
+ * computed before any such orphanizations, must
+ * be updated.
+ */
+ if (orphanized_dir) {
+ ret = update_ref_path(sctx, cur);
+ if (ret < 0)
+ goto out;
+ }
ret = send_link(sctx, cur->full_path,
valid_path);
if (ret < 0)
@@ -3990,34 +4036,9 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* ancestor inode.
*/
if (orphanized_ancestor) {
- struct fs_path *new_path;
-
- /*
- * Our reference's name member points to
- * its full_path member string, so we
- * use here a new path.
- */
- new_path = fs_path_alloc();
- if (!new_path) {
- ret = -ENOMEM;
- goto out;
- }
- ret = get_cur_path(sctx, cur->dir,
- cur->dir_gen,
- new_path);
- if (ret < 0) {
- fs_path_free(new_path);
- goto out;
- }
- ret = fs_path_add(new_path,
- cur->name,
- cur->name_len);
- if (ret < 0) {
- fs_path_free(new_path);
+ ret = update_ref_path(sctx, cur);
+ if (ret < 0)
goto out;
- }
- fs_path_free(cur->full_path);
- set_ref_path(cur, new_path);
}
ret = send_unlink(sctx, cur->full_path);
if (ret < 0)
@@ -5249,15 +5270,12 @@ static int is_extent_unchanged(struct send_ctx *sctx,
goto out;
}
- right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
if (right_type == BTRFS_FILE_EXTENT_INLINE) {
right_len = btrfs_file_extent_inline_len(eb, slot, ei);
right_len = PAGE_ALIGN(right_len);
} else {
right_len = btrfs_file_extent_num_bytes(eb, ei);
}
- right_offset = btrfs_file_extent_offset(eb, ei);
- right_gen = btrfs_file_extent_generation(eb, ei);
/*
* Are we at extent 8? If yes, we know the extent is changed.
@@ -5282,6 +5300,10 @@ static int is_extent_unchanged(struct send_ctx *sctx,
goto out;
}
+ right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
+ right_offset = btrfs_file_extent_offset(eb, ei);
+ right_gen = btrfs_file_extent_generation(eb, ei);
+
left_offset_fixed = left_offset;
if (key.offset < ekey->offset) {
/* Fix the right offset for 2a and 7. */
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 74e47794e63f..12540b6104b5 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1154,7 +1154,6 @@ static int btrfs_fill_super(struct super_block *sb,
goto fail_close;
}
- save_mount_options(sb, data);
cleancache_init_fs(sb);
sb->s_flags |= MS_ACTIVE;
return 0;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index e071d23f6148..ef7240ace576 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -271,6 +271,11 @@ out:
if (ret < 0)
err = ret;
dput(last);
+ /* last_name no longer match cache index */
+ if (fi->readdir_cache_idx >= 0) {
+ fi->readdir_cache_idx = -1;
+ fi->dir_release_count = 0;
+ }
}
return err;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 556f480c6936..180b3356ff86 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1354,7 +1354,7 @@ init_cifs(void)
spin_lock_init(&cifs_tcp_ses_lock);
spin_lock_init(&GlobalMid_Lock);
- get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
+ cifs_lock_secret = get_random_u32();
if (cifs_max_pending < 2) {
cifs_max_pending = 2;
diff --git a/fs/dcache.c b/fs/dcache.c
index 6c30be668487..f90141387f01 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -90,6 +90,11 @@ EXPORT_SYMBOL(rename_lock);
static struct kmem_cache *dentry_cache __read_mostly;
+const struct qstr empty_name = QSTR_INIT("", 0);
+EXPORT_SYMBOL(empty_name);
+const struct qstr slash_name = QSTR_INIT("/", 1);
+EXPORT_SYMBOL(slash_name);
+
/*
* This is the single most critical data structure when it comes
* to the dcache: the hashtable for lookups. Somebody should try
@@ -1606,8 +1611,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
*/
dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
if (unlikely(!name)) {
- static const struct qstr anon = QSTR_INIT("/", 1);
- name = &anon;
+ name = &slash_name;
dname = dentry->d_iname;
} else if (name->len > DNAME_INLINE_LEN-1) {
size_t size = offsetof(struct external_name, name[1]);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index a0e4e2f7e0be..c59f015f386e 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -203,8 +203,6 @@ static int debug_fill_super(struct super_block *sb, void *data, int silent)
struct debugfs_fs_info *fsi;
int err;
- save_mount_options(sb, data);
-
fsi = kzalloc(sizeof(struct debugfs_fs_info), GFP_KERNEL);
sb->s_fs_info = fsi;
if (!fsi) {
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index d7a7c53803c1..5b68e4294faa 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -29,7 +29,6 @@ static const struct super_operations efivarfs_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
.evict_inode = efivarfs_evict_inode,
- .show_options = generic_show_options,
};
static struct super_block *efivarfs_sb;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index a6d194831ed8..e767e4389cb1 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -960,10 +960,14 @@ static void ep_show_fdinfo(struct seq_file *m, struct file *f)
mutex_lock(&ep->mtx);
for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
+ struct inode *inode = file_inode(epi->ffd.file);
- seq_printf(m, "tfd: %8d events: %8x data: %16llx\n",
+ seq_printf(m, "tfd: %8d events: %8x data: %16llx "
+ " pos:%lli ino:%lx sdev:%x\n",
epi->ffd.fd, epi->event.events,
- (long long)epi->event.data);
+ (long long)epi->event.data,
+ (long long)epi->ffd.file->f_pos,
+ inode->i_ino, inode->i_sb->s_dev);
if (seq_has_overflowed(m))
break;
}
@@ -1073,6 +1077,50 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
return epir;
}
+#ifdef CONFIG_CHECKPOINT_RESTORE
+static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
+{
+ struct rb_node *rbp;
+ struct epitem *epi;
+
+ for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+ epi = rb_entry(rbp, struct epitem, rbn);
+ if (epi->ffd.fd == tfd) {
+ if (toff == 0)
+ return epi;
+ else
+ toff--;
+ }
+ cond_resched();
+ }
+
+ return NULL;
+}
+
+struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
+ unsigned long toff)
+{
+ struct file *file_raw;
+ struct eventpoll *ep;
+ struct epitem *epi;
+
+ if (!is_file_epoll(file))
+ return ERR_PTR(-EINVAL);
+
+ ep = file->private_data;
+
+ mutex_lock(&ep->mtx);
+ epi = ep_find_tfd(ep, tfd, toff);
+ if (epi)
+ file_raw = epi->ffd.file;
+ else
+ file_raw = ERR_PTR(-ENOENT);
+ mutex_unlock(&ep->mtx);
+
+ return file_raw;
+}
+#endif /* CONFIG_CHECKPOINT_RESTORE */
+
/*
* This is the callback that is passed to the wait queue wakeup
* mechanism. It is called by the stored file descriptors when they
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 79dafa71effd..51f0aea70cb4 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -175,11 +175,8 @@ ext2_get_acl(struct inode *inode, int type)
return acl;
}
-/*
- * inode->i_mutex: down
- */
-int
-ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+static int
+__ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
int name_index;
void *value = NULL;
@@ -189,13 +186,6 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
switch(type) {
case ACL_TYPE_ACCESS:
name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl) {
- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (error)
- return error;
- inode->i_ctime = current_time(inode);
- mark_inode_dirty(inode);
- }
break;
case ACL_TYPE_DEFAULT:
@@ -222,6 +212,31 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
}
/*
+ * inode->i_mutex: down
+ */
+int
+ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+ int error;
+ int update_mode = 0;
+ umode_t mode = inode->i_mode;
+
+ if (type == ACL_TYPE_ACCESS && acl) {
+ error = posix_acl_update_mode(inode, &mode, &acl);
+ if (error)
+ return error;
+ update_mode = 1;
+ }
+ error = __ext2_set_acl(inode, acl, type);
+ if (!error && update_mode) {
+ inode->i_mode = mode;
+ inode->i_ctime = current_time(inode);
+ mark_inode_dirty(inode);
+ }
+ return error;
+}
+
+/*
* Initialize the ACLs of a new inode. Called from ext2_new_inode.
*
* dir->i_mutex: down
@@ -238,12 +253,12 @@ ext2_init_acl(struct inode *inode, struct inode *dir)
return error;
if (default_acl) {
- error = ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
posix_acl_release(default_acl);
}
if (acl) {
if (!error)
- error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
posix_acl_release(acl);
}
return error;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 2dcbd5698884..30163d007b2f 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -659,6 +659,7 @@ static int ext2_get_blocks(struct inode *inode,
*/
err = -EAGAIN;
count = 0;
+ partial = chain + depth - 1;
break;
}
blk = le32_to_cpu(*(chain[depth-1].p + count));
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index a140c5e3dc54..b4b8438c42ef 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -211,7 +211,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
switch (type) {
case ACL_TYPE_ACCESS:
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl) {
+ if (acl && !ipage) {
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (error)
return error;
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 56bbf592e487..5b876f6d3f6b 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -879,6 +879,7 @@ int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
struct inode *inode;
struct f2fs_inode_info *fi;
bool is_dir = (type == DIR_INODE);
+ unsigned long ino = 0;
trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
get_pages(sbi, is_dir ?
@@ -901,8 +902,17 @@ retry:
inode = igrab(&fi->vfs_inode);
spin_unlock(&sbi->inode_lock[type]);
if (inode) {
+ unsigned long cur_ino = inode->i_ino;
+
filemap_fdatawrite(inode->i_mapping);
iput(inode);
+ /* We need to give cpu to another writers. */
+ if (ino == cur_ino) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ cond_resched();
+ } else {
+ ino = cur_ino;
+ }
} else {
/*
* We should submit bio, since it exists several
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index a0e6d2c65a9e..2706130c261b 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1538,7 +1538,6 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode)) {
- inode_unlock(inode);
ret = -EPERM;
goto unlock_out;
}
@@ -1549,9 +1548,8 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
- inode_unlock(inode);
ret = -EPERM;
- goto out;
+ goto unlock_out;
}
}
@@ -1564,7 +1562,6 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
f2fs_mark_inode_dirty_sync(inode, false);
unlock_out:
inode_unlock(inode);
-out:
mnt_drop_write_file(filp);
return ret;
}
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 9adc202fcd6f..71191d89917d 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -11,6 +11,7 @@
*/
#include <linux/proc_fs.h>
#include <linux/f2fs_fs.h>
+#include <linux/seq_file.h>
#include "f2fs.h"
#include "segment.h"
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 8b99955e3504..a920ad2629ac 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -33,9 +33,10 @@ static struct file_system_type *file_systems;
static DEFINE_RWLOCK(file_systems_lock);
/* WARNING: This can be used only if we _already_ own a reference */
-void get_filesystem(struct file_system_type *fs)
+struct file_system_type *get_filesystem(struct file_system_type *fs)
{
__module_get(fs->owner);
+ return fs;
}
void put_filesystem(struct file_system_type *fs)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 8b426f83909f..245c430a2e41 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -380,8 +380,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
struct page *page = radix_tree_deref_slot_protected(slot,
&mapping->tree_lock);
if (likely(page) && PageDirty(page)) {
- __dec_wb_stat(old_wb, WB_RECLAIMABLE);
- __inc_wb_stat(new_wb, WB_RECLAIMABLE);
+ dec_wb_stat(old_wb, WB_RECLAIMABLE);
+ inc_wb_stat(new_wb, WB_RECLAIMABLE);
}
}
@@ -391,8 +391,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
&mapping->tree_lock);
if (likely(page)) {
WARN_ON_ONCE(!PageWriteback(page));
- __dec_wb_stat(old_wb, WB_WRITEBACK);
- __inc_wb_stat(new_wb, WB_WRITEBACK);
+ dec_wb_stat(old_wb, WB_WRITEBACK);
+ inc_wb_stat(new_wb, WB_WRITEBACK);
}
}
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index db427658ccd9..5ee2e2f8576c 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -872,7 +872,6 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
struct buffer_head *bh;
struct gfs2_leaf *leaf;
struct gfs2_dirent *dent;
- struct qstr name = { .name = "" };
struct timespec tv = current_time(inode);
error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
@@ -896,7 +895,7 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
leaf->lf_sec = cpu_to_be64(tv.tv_sec);
memset(leaf->lf_reserved2, 0, sizeof(leaf->lf_reserved2));
dent = (struct gfs2_dirent *)(leaf+1);
- gfs2_qstr2dirent(&name, bh->b_size - sizeof(struct gfs2_leaf), dent);
+ gfs2_qstr2dirent(&empty_name, bh->b_size - sizeof(struct gfs2_leaf), dent);
*pbh = bh;
return leaf;
}
diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
index 9b92058a1240..6bb5d7c42888 100644
--- a/fs/hfsplus/posix_acl.c
+++ b/fs/hfsplus/posix_acl.c
@@ -51,8 +51,8 @@ struct posix_acl *hfsplus_get_posix_acl(struct inode *inode, int type)
return acl;
}
-int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
- int type)
+static int __hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
+ int type)
{
int err;
char *xattr_name;
@@ -64,12 +64,6 @@ int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
switch (type) {
case ACL_TYPE_ACCESS:
xattr_name = XATTR_NAME_POSIX_ACL_ACCESS;
- if (acl) {
- err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (err)
- return err;
- }
- err = 0;
break;
case ACL_TYPE_DEFAULT:
@@ -105,6 +99,18 @@ end_set_acl:
return err;
}
+int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+ int err;
+
+ if (type == ACL_TYPE_ACCESS && acl) {
+ err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (err)
+ return err;
+ }
+ return __hfsplus_set_posix_acl(inode, acl, type);
+}
+
int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir)
{
int err = 0;
@@ -122,15 +128,15 @@ int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir)
return err;
if (default_acl) {
- err = hfsplus_set_posix_acl(inode, default_acl,
- ACL_TYPE_DEFAULT);
+ err = __hfsplus_set_posix_acl(inode, default_acl,
+ ACL_TYPE_DEFAULT);
posix_acl_release(default_acl);
}
if (acl) {
if (!err)
- err = hfsplus_set_posix_acl(inode, acl,
- ACL_TYPE_ACCESS);
+ err = __hfsplus_set_posix_acl(inode, acl,
+ ACL_TYPE_ACCESS);
posix_acl_release(acl);
}
return err;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 52388611635e..28d2753be094 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -46,13 +46,13 @@ static const struct inode_operations hugetlbfs_dir_inode_operations;
static const struct inode_operations hugetlbfs_inode_operations;
struct hugetlbfs_config {
- kuid_t uid;
- kgid_t gid;
- umode_t mode;
- long max_hpages;
- long nr_inodes;
- struct hstate *hstate;
- long min_hpages;
+ struct hstate *hstate;
+ long max_hpages;
+ long nr_inodes;
+ long min_hpages;
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
};
struct hugetlbfs_inode_info {
@@ -861,6 +861,46 @@ static int hugetlbfs_error_remove_page(struct address_space *mapping,
return 0;
}
+/*
+ * Display the mount options in /proc/mounts.
+ */
+static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
+ struct hugepage_subpool *spool = sbinfo->spool;
+ unsigned long hpage_size = huge_page_size(sbinfo->hstate);
+ unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
+ char mod;
+
+ if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, sbinfo->uid));
+ if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, sbinfo->gid));
+ if (sbinfo->mode != 0755)
+ seq_printf(m, ",mode=%o", sbinfo->mode);
+ if (sbinfo->max_inodes != -1)
+ seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
+
+ hpage_size /= 1024;
+ mod = 'K';
+ if (hpage_size >= 1024) {
+ hpage_size /= 1024;
+ mod = 'M';
+ }
+ seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
+ if (spool) {
+ if (spool->max_hpages != -1)
+ seq_printf(m, ",size=%llu",
+ (unsigned long long)spool->max_hpages << hpage_shift);
+ if (spool->min_hpages != -1)
+ seq_printf(m, ",min_size=%llu",
+ (unsigned long long)spool->min_hpages << hpage_shift);
+ }
+ return 0;
+}
+
static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
@@ -1019,19 +1059,19 @@ static const struct super_operations hugetlbfs_ops = {
.evict_inode = hugetlbfs_evict_inode,
.statfs = hugetlbfs_statfs,
.put_super = hugetlbfs_put_super,
- .show_options = generic_show_options,
+ .show_options = hugetlbfs_show_options,
};
-enum { NO_SIZE, SIZE_STD, SIZE_PERCENT };
+enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
/*
* Convert size option passed from command line to number of huge pages
* in the pool specified by hstate. Size option could be in bytes
* (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
*/
-static long long
+static long
hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
- int val_type)
+ enum hugetlbfs_size_type val_type)
{
if (val_type == NO_SIZE)
return -1;
@@ -1053,7 +1093,7 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
substring_t args[MAX_OPT_ARGS];
int option;
unsigned long long max_size_opt = 0, min_size_opt = 0;
- int max_val_type = NO_SIZE, min_val_type = NO_SIZE;
+ enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE;
if (!options)
return 0;
@@ -1167,8 +1207,6 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
struct hugetlbfs_config config;
struct hugetlbfs_sb_info *sbinfo;
- save_mount_options(sb, data);
-
config.max_hpages = -1; /* No limit on size by default */
config.nr_inodes = -1; /* No limit on number of inodes by default */
config.uid = current_fsuid();
@@ -1189,6 +1227,10 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
sbinfo->max_inodes = config.nr_inodes;
sbinfo->free_inodes = config.nr_inodes;
sbinfo->spool = NULL;
+ sbinfo->uid = config.uid;
+ sbinfo->gid = config.gid;
+ sbinfo->mode = config.mode;
+
/*
* Allocate and initialize subpool if maximum or minimum size is
* specified. Any needed reservations (for minimim size) are taken
diff --git a/fs/iomap.c b/fs/iomap.c
index 173222863aca..039266128b7f 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -610,8 +610,8 @@ iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
loff_t length = size - offset;
loff_t ret;
- /* Nothing to be found beyond the end of the file. */
- if (offset >= size)
+ /* Nothing to be found before or beyond the end of the file. */
+ if (offset < 0 || offset >= size)
return -ENXIO;
while (length > 0) {
@@ -656,8 +656,8 @@ iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
loff_t length = size - offset;
loff_t ret;
- /* Nothing to be found beyond the end of the file. */
- if (offset >= size)
+ /* Nothing to be found before or beyond the end of the file. */
+ if (offset < 0 || offset >= size)
return -ENXIO;
while (length > 0) {
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 020ba0936146..217a5e7815da 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -23,6 +23,7 @@
#include <linux/parser.h>
#include <linux/mpage.h>
#include <linux/user_namespace.h>
+#include <linux/seq_file.h>
#include "isofs.h"
#include "zisofs.h"
@@ -57,6 +58,7 @@ static void isofs_put_super(struct super_block *sb)
static int isofs_read_inode(struct inode *, int relocated);
static int isofs_statfs (struct dentry *, struct kstatfs *);
+static int isofs_show_options(struct seq_file *, struct dentry *);
static struct kmem_cache *isofs_inode_cachep;
@@ -123,7 +125,7 @@ static const struct super_operations isofs_sops = {
.put_super = isofs_put_super,
.statfs = isofs_statfs,
.remount_fs = isofs_remount,
- .show_options = generic_show_options,
+ .show_options = isofs_show_options,
};
@@ -408,7 +410,11 @@ static int parse_options(char *options, struct iso9660_options *popt)
if (match_int(&args[0], &option))
return 0;
n = option;
- if (n > 99)
+ /*
+ * Track numbers are supposed to be in range 1-99, the
+ * mount option starts indexing at 0.
+ */
+ if (n >= 99)
return 0;
popt->session = n + 1;
break;
@@ -473,6 +479,48 @@ static int parse_options(char *options, struct iso9660_options *popt)
}
/*
+ * Display the mount options in /proc/mounts.
+ */
+static int isofs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct isofs_sb_info *sbi = ISOFS_SB(root->d_sb);
+
+ if (!sbi->s_rock) seq_puts(m, ",norock");
+ else if (!sbi->s_joliet_level) seq_puts(m, ",nojoliet");
+ if (sbi->s_cruft) seq_puts(m, ",cruft");
+ if (sbi->s_hide) seq_puts(m, ",hide");
+ if (sbi->s_nocompress) seq_puts(m, ",nocompress");
+ if (sbi->s_overriderockperm) seq_puts(m, ",overriderockperm");
+ if (sbi->s_showassoc) seq_puts(m, ",showassoc");
+ if (sbi->s_utf8) seq_puts(m, ",utf8");
+
+ if (sbi->s_check) seq_printf(m, ",check=%c", sbi->s_check);
+ if (sbi->s_mapping) seq_printf(m, ",map=%c", sbi->s_mapping);
+ if (sbi->s_session != 255) seq_printf(m, ",session=%u", sbi->s_session - 1);
+ if (sbi->s_sbsector != -1) seq_printf(m, ",sbsector=%u", sbi->s_sbsector);
+
+ if (root->d_sb->s_blocksize != 1024)
+ seq_printf(m, ",blocksize=%lu", root->d_sb->s_blocksize);
+
+ if (sbi->s_uid_set)
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, sbi->s_uid));
+ if (sbi->s_gid_set)
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, sbi->s_gid));
+
+ if (sbi->s_dmode != ISOFS_INVALID_MODE)
+ seq_printf(m, ",dmode=%o", sbi->s_dmode);
+ if (sbi->s_fmode != ISOFS_INVALID_MODE)
+ seq_printf(m, ",fmode=%o", sbi->s_fmode);
+
+ if (sbi->s_nls_iocharset &&
+ strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0)
+ seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset);
+ return 0;
+}
+
+/*
* look if the driver can tell the multi session redirection value
*
* don't change this if you don't know what you do, please!
@@ -499,7 +547,7 @@ static unsigned int isofs_get_last_session(struct super_block *sb, s32 session)
vol_desc_start=0;
ms_info.addr_format=CDROM_LBA;
- if(session >= 0 && session <= 99) {
+ if (session > 0) {
struct cdrom_tocentry Te;
Te.cdte_track=session;
Te.cdte_format=CDROM_LBA;
@@ -583,8 +631,6 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
int table, error = -EINVAL;
unsigned int vol_desc_start;
- save_mount_options(s, data);
-
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
@@ -605,6 +651,8 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
opt.blocksize = sb_min_blocksize(s, opt.blocksize);
sbi->s_high_sierra = 0; /* default is iso9660 */
+ sbi->s_session = opt.session;
+ sbi->s_sbsector = opt.sbsector;
vol_desc_start = (opt.sbsector != -1) ?
opt.sbsector : isofs_get_last_session(s,opt.session);
@@ -911,6 +959,7 @@ root_found:
table += 2;
if (opt.check == 'r')
table++;
+ sbi->s_check = opt.check;
if (table)
s->s_d_op = &isofs_dentry_ops[table - 1];
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index 0ac4c1f73fbd..133a456b0425 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -36,8 +36,11 @@ struct isofs_sb_info {
unsigned long s_max_size;
int s_rock_offset; /* offset of SUSP fields within SU area */
+ s32 s_sbsector;
unsigned char s_joliet_level;
unsigned char s_mapping;
+ unsigned char s_check;
+ unsigned char s_session;
unsigned int s_high_sierra:1;
unsigned int s_rock:2;
unsigned int s_utf8:1;
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
index d3e40db28930..c349fc0f9b80 100644
--- a/fs/lockd/clnt4xdr.c
+++ b/fs/lockd/clnt4xdr.c
@@ -381,8 +381,9 @@ static void encode_nlm4_lock(struct xdr_stream *xdr,
*/
static void nlm4_xdr_enc_testargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_args *args)
+ const void *data)
{
+ const struct nlm_args *args = data;
const struct nlm_lock *lock = &args->lock;
encode_cookie(xdr, &args->cookie);
@@ -402,8 +403,9 @@ static void nlm4_xdr_enc_testargs(struct rpc_rqst *req,
*/
static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_args *args)
+ const void *data)
{
+ const struct nlm_args *args = data;
const struct nlm_lock *lock = &args->lock;
encode_cookie(xdr, &args->cookie);
@@ -424,8 +426,9 @@ static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req,
*/
static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_args *args)
+ const void *data)
{
+ const struct nlm_args *args = data;
const struct nlm_lock *lock = &args->lock;
encode_cookie(xdr, &args->cookie);
@@ -442,8 +445,9 @@ static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req,
*/
static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_args *args)
+ const void *data)
{
+ const struct nlm_args *args = data;
const struct nlm_lock *lock = &args->lock;
encode_cookie(xdr, &args->cookie);
@@ -458,8 +462,10 @@ static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req,
*/
static void nlm4_xdr_enc_res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_res *result)
+ const void *data)
{
+ const struct nlm_res *result = data;
+
encode_cookie(xdr, &result->cookie);
encode_nlm4_stat(xdr, result->status);
}
@@ -479,8 +485,10 @@ static void nlm4_xdr_enc_res(struct rpc_rqst *req,
*/
static void nlm4_xdr_enc_testres(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_res *result)
+ const void *data)
{
+ const struct nlm_res *result = data;
+
encode_cookie(xdr, &result->cookie);
encode_nlm4_stat(xdr, result->status);
if (result->status == nlm_lck_denied)
@@ -525,8 +533,9 @@ out:
static int nlm4_xdr_dec_testres(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nlm_res *result)
+ void *data)
{
+ struct nlm_res *result = data;
int error;
error = decode_cookie(xdr, &result->cookie);
@@ -545,8 +554,9 @@ out:
*/
static int nlm4_xdr_dec_res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nlm_res *result)
+ void *data)
{
+ struct nlm_res *result = data;
int error;
error = decode_cookie(xdr, &result->cookie);
@@ -566,15 +576,15 @@ out:
#define PROC(proc, argtype, restype) \
[NLMPROC_##proc] = { \
.p_proc = NLMPROC_##proc, \
- .p_encode = (kxdreproc_t)nlm4_xdr_enc_##argtype, \
- .p_decode = (kxdrdproc_t)nlm4_xdr_dec_##restype, \
+ .p_encode = nlm4_xdr_enc_##argtype, \
+ .p_decode = nlm4_xdr_dec_##restype, \
.p_arglen = NLM4_##argtype##_sz, \
.p_replen = NLM4_##restype##_sz, \
.p_statidx = NLMPROC_##proc, \
.p_name = #proc, \
}
-static struct rpc_procinfo nlm4_procedures[] = {
+static const struct rpc_procinfo nlm4_procedures[] = {
PROC(TEST, testargs, testres),
PROC(LOCK, lockargs, res),
PROC(CANCEL, cancargs, res),
@@ -592,8 +602,10 @@ static struct rpc_procinfo nlm4_procedures[] = {
PROC(GRANTED_RES, res, norep),
};
+static unsigned int nlm_version4_counts[ARRAY_SIZE(nlm4_procedures)];
const struct rpc_version nlm_version4 = {
.number = 4,
.nrprocs = ARRAY_SIZE(nlm4_procedures),
.procs = nlm4_procedures,
+ .counts = nlm_version4_counts,
};
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
index 3e9f7874b975..3b4724a6c4ee 100644
--- a/fs/lockd/clntxdr.c
+++ b/fs/lockd/clntxdr.c
@@ -374,8 +374,9 @@ static void encode_nlm_lock(struct xdr_stream *xdr,
*/
static void nlm_xdr_enc_testargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_args *args)
+ const void *data)
{
+ const struct nlm_args *args = data;
const struct nlm_lock *lock = &args->lock;
encode_cookie(xdr, &args->cookie);
@@ -395,8 +396,9 @@ static void nlm_xdr_enc_testargs(struct rpc_rqst *req,
*/
static void nlm_xdr_enc_lockargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_args *args)
+ const void *data)
{
+ const struct nlm_args *args = data;
const struct nlm_lock *lock = &args->lock;
encode_cookie(xdr, &args->cookie);
@@ -417,8 +419,9 @@ static void nlm_xdr_enc_lockargs(struct rpc_rqst *req,
*/
static void nlm_xdr_enc_cancargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_args *args)
+ const void *data)
{
+ const struct nlm_args *args = data;
const struct nlm_lock *lock = &args->lock;
encode_cookie(xdr, &args->cookie);
@@ -435,8 +438,9 @@ static void nlm_xdr_enc_cancargs(struct rpc_rqst *req,
*/
static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_args *args)
+ const void *data)
{
+ const struct nlm_args *args = data;
const struct nlm_lock *lock = &args->lock;
encode_cookie(xdr, &args->cookie);
@@ -451,8 +455,10 @@ static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req,
*/
static void nlm_xdr_enc_res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_res *result)
+ const void *data)
{
+ const struct nlm_res *result = data;
+
encode_cookie(xdr, &result->cookie);
encode_nlm_stat(xdr, result->status);
}
@@ -479,8 +485,10 @@ static void encode_nlm_testrply(struct xdr_stream *xdr,
static void nlm_xdr_enc_testres(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nlm_res *result)
+ const void *data)
{
+ const struct nlm_res *result = data;
+
encode_cookie(xdr, &result->cookie);
encode_nlm_stat(xdr, result->status);
encode_nlm_testrply(xdr, result);
@@ -523,8 +531,9 @@ out:
static int nlm_xdr_dec_testres(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nlm_res *result)
+ void *data)
{
+ struct nlm_res *result = data;
int error;
error = decode_cookie(xdr, &result->cookie);
@@ -543,8 +552,9 @@ out:
*/
static int nlm_xdr_dec_res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nlm_res *result)
+ void *data)
{
+ struct nlm_res *result = data;
int error;
error = decode_cookie(xdr, &result->cookie);
@@ -564,15 +574,15 @@ out:
#define PROC(proc, argtype, restype) \
[NLMPROC_##proc] = { \
.p_proc = NLMPROC_##proc, \
- .p_encode = (kxdreproc_t)nlm_xdr_enc_##argtype, \
- .p_decode = (kxdrdproc_t)nlm_xdr_dec_##restype, \
+ .p_encode = nlm_xdr_enc_##argtype, \
+ .p_decode = nlm_xdr_dec_##restype, \
.p_arglen = NLM_##argtype##_sz, \
.p_replen = NLM_##restype##_sz, \
.p_statidx = NLMPROC_##proc, \
.p_name = #proc, \
}
-static struct rpc_procinfo nlm_procedures[] = {
+static const struct rpc_procinfo nlm_procedures[] = {
PROC(TEST, testargs, testres),
PROC(LOCK, lockargs, res),
PROC(CANCEL, cancargs, res),
@@ -590,16 +600,20 @@ static struct rpc_procinfo nlm_procedures[] = {
PROC(GRANTED_RES, res, norep),
};
+static unsigned int nlm_version1_counts[ARRAY_SIZE(nlm_procedures)];
static const struct rpc_version nlm_version1 = {
- .number = 1,
- .nrprocs = ARRAY_SIZE(nlm_procedures),
- .procs = nlm_procedures,
+ .number = 1,
+ .nrprocs = ARRAY_SIZE(nlm_procedures),
+ .procs = nlm_procedures,
+ .counts = nlm_version1_counts,
};
+static unsigned int nlm_version3_counts[ARRAY_SIZE(nlm_procedures)];
static const struct rpc_version nlm_version3 = {
- .number = 3,
- .nrprocs = ARRAY_SIZE(nlm_procedures),
- .procs = nlm_procedures,
+ .number = 3,
+ .nrprocs = ARRAY_SIZE(nlm_procedures),
+ .procs = nlm_procedures,
+ .counts = nlm_version3_counts,
};
static const struct rpc_version *nlm_versions[] = {
@@ -613,9 +627,9 @@ static const struct rpc_version *nlm_versions[] = {
static struct rpc_stat nlm_rpc_stats;
const struct rpc_program nlm_program = {
- .name = "lockd",
- .number = NLM_PROGRAM,
- .nrvers = ARRAY_SIZE(nlm_versions),
- .version = nlm_versions,
- .stats = &nlm_rpc_stats,
+ .name = "lockd",
+ .number = NLM_PROGRAM,
+ .nrvers = ARRAY_SIZE(nlm_versions),
+ .version = nlm_versions,
+ .stats = &nlm_rpc_stats,
};
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 19166d4a8d31..9d8166c39c54 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -476,22 +476,23 @@ static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
}
static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nsm_args *argp)
+ const void *argp)
{
encode_mon_id(xdr, argp);
encode_priv(xdr, argp);
}
static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nsm_args *argp)
+ const void *argp)
{
encode_mon_id(xdr, argp);
}
static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nsm_res *resp)
+ void *data)
{
+ struct nsm_res *resp = data;
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4);
@@ -507,8 +508,9 @@ static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp,
static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nsm_res *resp)
+ void *data)
{
+ struct nsm_res *resp = data;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
@@ -529,11 +531,11 @@ static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp,
#define SM_monres_sz 2
#define SM_unmonres_sz 1
-static struct rpc_procinfo nsm_procedures[] = {
+static const struct rpc_procinfo nsm_procedures[] = {
[NSMPROC_MON] = {
.p_proc = NSMPROC_MON,
- .p_encode = (kxdreproc_t)nsm_xdr_enc_mon,
- .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat_res,
+ .p_encode = nsm_xdr_enc_mon,
+ .p_decode = nsm_xdr_dec_stat_res,
.p_arglen = SM_mon_sz,
.p_replen = SM_monres_sz,
.p_statidx = NSMPROC_MON,
@@ -541,8 +543,8 @@ static struct rpc_procinfo nsm_procedures[] = {
},
[NSMPROC_UNMON] = {
.p_proc = NSMPROC_UNMON,
- .p_encode = (kxdreproc_t)nsm_xdr_enc_unmon,
- .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat,
+ .p_encode = nsm_xdr_enc_unmon,
+ .p_decode = nsm_xdr_dec_stat,
.p_arglen = SM_mon_id_sz,
.p_replen = SM_unmonres_sz,
.p_statidx = NSMPROC_UNMON,
@@ -550,10 +552,12 @@ static struct rpc_procinfo nsm_procedures[] = {
},
};
+static unsigned int nsm_version1_counts[ARRAY_SIZE(nsm_procedures)];
static const struct rpc_version nsm_version1 = {
- .number = 1,
- .nrprocs = ARRAY_SIZE(nsm_procedures),
- .procs = nsm_procedures
+ .number = 1,
+ .nrprocs = ARRAY_SIZE(nsm_procedures),
+ .procs = nsm_procedures,
+ .counts = nsm_version1_counts,
};
static const struct rpc_version *nsm_version[] = {
@@ -563,9 +567,9 @@ static const struct rpc_version *nsm_version[] = {
static struct rpc_stat nsm_stats;
static const struct rpc_program nsm_program = {
- .name = "statd",
- .number = NSM_PROGRAM,
- .nrvers = ARRAY_SIZE(nsm_version),
- .version = nsm_version,
- .stats = &nsm_stats
+ .name = "statd",
+ .number = NSM_PROGRAM,
+ .nrvers = ARRAY_SIZE(nsm_version),
+ .version = nsm_version,
+ .stats = &nsm_stats
};
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 5d481e8a1b5d..726b6cecf430 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -739,27 +739,33 @@ module_exit(exit_nlm);
/*
* Define NLM program and procedures
*/
-static struct svc_version nlmsvc_version1 = {
- .vs_vers = 1,
- .vs_nproc = 17,
- .vs_proc = nlmsvc_procedures,
- .vs_xdrsize = NLMSVC_XDRSIZE,
+static unsigned int nlmsvc_version1_count[17];
+static const struct svc_version nlmsvc_version1 = {
+ .vs_vers = 1,
+ .vs_nproc = 17,
+ .vs_proc = nlmsvc_procedures,
+ .vs_count = nlmsvc_version1_count,
+ .vs_xdrsize = NLMSVC_XDRSIZE,
};
-static struct svc_version nlmsvc_version3 = {
- .vs_vers = 3,
- .vs_nproc = 24,
- .vs_proc = nlmsvc_procedures,
- .vs_xdrsize = NLMSVC_XDRSIZE,
+static unsigned int nlmsvc_version3_count[24];
+static const struct svc_version nlmsvc_version3 = {
+ .vs_vers = 3,
+ .vs_nproc = 24,
+ .vs_proc = nlmsvc_procedures,
+ .vs_count = nlmsvc_version3_count,
+ .vs_xdrsize = NLMSVC_XDRSIZE,
};
#ifdef CONFIG_LOCKD_V4
-static struct svc_version nlmsvc_version4 = {
- .vs_vers = 4,
- .vs_nproc = 24,
- .vs_proc = nlmsvc_procedures4,
- .vs_xdrsize = NLMSVC_XDRSIZE,
+static unsigned int nlmsvc_version4_count[24];
+static const struct svc_version nlmsvc_version4 = {
+ .vs_vers = 4,
+ .vs_nproc = 24,
+ .vs_proc = nlmsvc_procedures4,
+ .vs_count = nlmsvc_version4_count,
+ .vs_xdrsize = NLMSVC_XDRSIZE,
};
#endif
-static struct svc_version * nlmsvc_version[] = {
+static const struct svc_version *nlmsvc_version[] = {
[1] = &nlmsvc_version1,
[3] = &nlmsvc_version3,
#ifdef CONFIG_LOCKD_V4
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 09c576f26c7b..82925f17ec45 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -62,7 +62,7 @@ no_locks:
* NULL: Test for presence of service
*/
static __be32
-nlm4svc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+nlm4svc_proc_null(struct svc_rqst *rqstp)
{
dprintk("lockd: NULL called\n");
return rpc_success;
@@ -72,9 +72,9 @@ nlm4svc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
* TEST: Check for conflicting lock
*/
static __be32
-nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+__nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
__be32 rc = rpc_success;
@@ -99,9 +99,15 @@ nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
}
static __be32
-nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+nlm4svc_proc_test(struct svc_rqst *rqstp)
{
+ return __nlm4svc_proc_test(rqstp, rqstp->rq_resp);
+}
+
+static __be32
+__nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_res *resp)
+{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
__be32 rc = rpc_success;
@@ -141,9 +147,15 @@ nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
}
static __be32
-nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+nlm4svc_proc_lock(struct svc_rqst *rqstp)
+{
+ return __nlm4svc_proc_lock(rqstp, rqstp->rq_resp);
+}
+
+static __be32
+__nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_res *resp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
@@ -170,13 +182,19 @@ nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
return rpc_success;
}
+static __be32
+nlm4svc_proc_cancel(struct svc_rqst *rqstp)
+{
+ return __nlm4svc_proc_cancel(rqstp, rqstp->rq_resp);
+}
+
/*
* UNLOCK: release a lock
*/
static __be32
-nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+__nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_res *resp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
@@ -203,14 +221,21 @@ nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
return rpc_success;
}
+static __be32
+nlm4svc_proc_unlock(struct svc_rqst *rqstp)
+{
+ return __nlm4svc_proc_unlock(rqstp, rqstp->rq_resp);
+}
+
/*
* GRANTED: A server calls us to tell that a process' lock request
* was granted
*/
static __be32
-nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+__nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_res *resp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
+
resp->cookie = argp->cookie;
dprintk("lockd: GRANTED called\n");
@@ -219,6 +244,12 @@ nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
return rpc_success;
}
+static __be32
+nlm4svc_proc_granted(struct svc_rqst *rqstp)
+{
+ return __nlm4svc_proc_granted(rqstp, rqstp->rq_resp);
+}
+
/*
* This is the generic lockd callback for async RPC calls
*/
@@ -243,9 +274,10 @@ static const struct rpc_call_ops nlm4svc_callback_ops = {
* because we send the callback before the reply proper. I hope this
* doesn't break any clients.
*/
-static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp,
- __be32 (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *))
+static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc,
+ __be32 (*func)(struct svc_rqst *, struct nlm_res *))
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_rqst *call;
__be32 stat;
@@ -261,7 +293,7 @@ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
if (call == NULL)
return rpc_system_err;
- stat = func(rqstp, argp, &call->a_res);
+ stat = func(rqstp, &call->a_res);
if (stat != 0) {
nlmsvc_release_call(call);
return stat;
@@ -273,48 +305,44 @@ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
return rpc_success;
}
-static __be32 nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+static __be32 nlm4svc_proc_test_msg(struct svc_rqst *rqstp)
{
dprintk("lockd: TEST_MSG called\n");
- return nlm4svc_callback(rqstp, NLMPROC_TEST_RES, argp, nlm4svc_proc_test);
+ return nlm4svc_callback(rqstp, NLMPROC_TEST_RES, __nlm4svc_proc_test);
}
-static __be32 nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+static __be32 nlm4svc_proc_lock_msg(struct svc_rqst *rqstp)
{
dprintk("lockd: LOCK_MSG called\n");
- return nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlm4svc_proc_lock);
+ return nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, __nlm4svc_proc_lock);
}
-static __be32 nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+static __be32 nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp)
{
dprintk("lockd: CANCEL_MSG called\n");
- return nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlm4svc_proc_cancel);
+ return nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, __nlm4svc_proc_cancel);
}
-static __be32 nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+static __be32 nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp)
{
dprintk("lockd: UNLOCK_MSG called\n");
- return nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlm4svc_proc_unlock);
+ return nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, __nlm4svc_proc_unlock);
}
-static __be32 nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+static __be32 nlm4svc_proc_granted_msg(struct svc_rqst *rqstp)
{
dprintk("lockd: GRANTED_MSG called\n");
- return nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlm4svc_proc_granted);
+ return nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, __nlm4svc_proc_granted);
}
/*
* SHARE: create a DOS share or alter existing share.
*/
static __be32
-nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+nlm4svc_proc_share(struct svc_rqst *rqstp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
+ struct nlm_res *resp = rqstp->rq_resp;
struct nlm_host *host;
struct nlm_file *file;
@@ -345,9 +373,10 @@ nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
* UNSHARE: Release a DOS share.
*/
static __be32
-nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+nlm4svc_proc_unshare(struct svc_rqst *rqstp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
+ struct nlm_res *resp = rqstp->rq_resp;
struct nlm_host *host;
struct nlm_file *file;
@@ -378,22 +407,23 @@ nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
* NM_LOCK: Create an unmonitored lock
*/
static __be32
-nlm4svc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+nlm4svc_proc_nm_lock(struct svc_rqst *rqstp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
+
dprintk("lockd: NM_LOCK called\n");
argp->monitor = 0; /* just clean the monitor flag */
- return nlm4svc_proc_lock(rqstp, argp, resp);
+ return nlm4svc_proc_lock(rqstp);
}
/*
* FREE_ALL: Release all locks and shares held by client
*/
static __be32
-nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+nlm4svc_proc_free_all(struct svc_rqst *rqstp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
/* Obtain client */
@@ -409,9 +439,10 @@ nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
* SM_NOTIFY: private callback from statd (not part of official NLM proto)
*/
static __be32
-nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
- void *resp)
+nlm4svc_proc_sm_notify(struct svc_rqst *rqstp)
{
+ struct nlm_reboot *argp = rqstp->rq_argp;
+
dprintk("lockd: SM_NOTIFY called\n");
if (!nlm_privileged_requester(rqstp)) {
@@ -429,9 +460,10 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
* client sent a GRANTED_RES, let's remove the associated block
*/
static __be32
-nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
- void *resp)
+nlm4svc_proc_granted_res(struct svc_rqst *rqstp)
{
+ struct nlm_res *argp = rqstp->rq_argp;
+
if (!nlmsvc_ops)
return rpc_success;
@@ -463,9 +495,9 @@ nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
struct nlm_void { int dummy; };
#define PROC(name, xargt, xrest, argt, rest, respsize) \
- { .pc_func = (svc_procfunc) nlm4svc_proc_##name, \
- .pc_decode = (kxdrproc_t) nlm4svc_decode_##xargt, \
- .pc_encode = (kxdrproc_t) nlm4svc_encode_##xrest, \
+ { .pc_func = nlm4svc_proc_##name, \
+ .pc_decode = nlm4svc_decode_##xargt, \
+ .pc_encode = nlm4svc_encode_##xrest, \
.pc_release = NULL, \
.pc_argsize = sizeof(struct nlm_##argt), \
.pc_ressize = sizeof(struct nlm_##rest), \
@@ -475,7 +507,7 @@ struct nlm_void { int dummy; };
#define No (1+1024/4) /* netobj */
#define St 1 /* status */
#define Rg 4 /* range (offset + length) */
-struct svc_procedure nlmsvc_procedures4[] = {
+const struct svc_procedure nlmsvc_procedures4[] = {
PROC(null, void, void, void, void, 1),
PROC(test, testargs, testres, args, res, Ck+St+2+No+Rg),
PROC(lock, lockargs, res, args, res, Ck+St),
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index fb26b9f522e7..07915162581d 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -92,7 +92,7 @@ no_locks:
* NULL: Test for presence of service
*/
static __be32
-nlmsvc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+nlmsvc_proc_null(struct svc_rqst *rqstp)
{
dprintk("lockd: NULL called\n");
return rpc_success;
@@ -102,9 +102,9 @@ nlmsvc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
* TEST: Check for conflicting lock
*/
static __be32
-nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+__nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
__be32 rc = rpc_success;
@@ -130,9 +130,15 @@ nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
}
static __be32
-nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+nlmsvc_proc_test(struct svc_rqst *rqstp)
{
+ return __nlmsvc_proc_test(rqstp, rqstp->rq_resp);
+}
+
+static __be32
+__nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_res *resp)
+{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
__be32 rc = rpc_success;
@@ -172,9 +178,15 @@ nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
}
static __be32
-nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+nlmsvc_proc_lock(struct svc_rqst *rqstp)
+{
+ return __nlmsvc_proc_lock(rqstp, rqstp->rq_resp);
+}
+
+static __be32
+__nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_res *resp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
struct net *net = SVC_NET(rqstp);
@@ -202,13 +214,19 @@ nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
return rpc_success;
}
+static __be32
+nlmsvc_proc_cancel(struct svc_rqst *rqstp)
+{
+ return __nlmsvc_proc_cancel(rqstp, rqstp->rq_resp);
+}
+
/*
* UNLOCK: release a lock
*/
static __be32
-nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+__nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_res *resp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
struct net *net = SVC_NET(rqstp);
@@ -236,14 +254,21 @@ nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
return rpc_success;
}
+static __be32
+nlmsvc_proc_unlock(struct svc_rqst *rqstp)
+{
+ return __nlmsvc_proc_unlock(rqstp, rqstp->rq_resp);
+}
+
/*
* GRANTED: A server calls us to tell that a process' lock request
* was granted
*/
static __be32
-nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+__nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_res *resp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
+
resp->cookie = argp->cookie;
dprintk("lockd: GRANTED called\n");
@@ -252,6 +277,12 @@ nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
return rpc_success;
}
+static __be32
+nlmsvc_proc_granted(struct svc_rqst *rqstp)
+{
+ return __nlmsvc_proc_granted(rqstp, rqstp->rq_resp);
+}
+
/*
* This is the generic lockd callback for async RPC calls
*/
@@ -284,9 +315,10 @@ static const struct rpc_call_ops nlmsvc_callback_ops = {
* because we send the callback before the reply proper. I hope this
* doesn't break any clients.
*/
-static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp,
- __be32 (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *))
+static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc,
+ __be32 (*func)(struct svc_rqst *, struct nlm_res *))
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_rqst *call;
__be32 stat;
@@ -302,7 +334,7 @@ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
if (call == NULL)
return rpc_system_err;
- stat = func(rqstp, argp, &call->a_res);
+ stat = func(rqstp, &call->a_res);
if (stat != 0) {
nlmsvc_release_call(call);
return stat;
@@ -314,50 +346,46 @@ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
return rpc_success;
}
-static __be32 nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+static __be32 nlmsvc_proc_test_msg(struct svc_rqst *rqstp)
{
dprintk("lockd: TEST_MSG called\n");
- return nlmsvc_callback(rqstp, NLMPROC_TEST_RES, argp, nlmsvc_proc_test);
+ return nlmsvc_callback(rqstp, NLMPROC_TEST_RES, __nlmsvc_proc_test);
}
-static __be32 nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+static __be32 nlmsvc_proc_lock_msg(struct svc_rqst *rqstp)
{
dprintk("lockd: LOCK_MSG called\n");
- return nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlmsvc_proc_lock);
+ return nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, __nlmsvc_proc_lock);
}
-static __be32 nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+static __be32 nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp)
{
dprintk("lockd: CANCEL_MSG called\n");
- return nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlmsvc_proc_cancel);
+ return nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, __nlmsvc_proc_cancel);
}
static __be32
-nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp)
{
dprintk("lockd: UNLOCK_MSG called\n");
- return nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlmsvc_proc_unlock);
+ return nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, __nlmsvc_proc_unlock);
}
static __be32
-nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+nlmsvc_proc_granted_msg(struct svc_rqst *rqstp)
{
dprintk("lockd: GRANTED_MSG called\n");
- return nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlmsvc_proc_granted);
+ return nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, __nlmsvc_proc_granted);
}
/*
* SHARE: create a DOS share or alter existing share.
*/
static __be32
-nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+nlmsvc_proc_share(struct svc_rqst *rqstp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
+ struct nlm_res *resp = rqstp->rq_resp;
struct nlm_host *host;
struct nlm_file *file;
@@ -388,9 +416,10 @@ nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
* UNSHARE: Release a DOS share.
*/
static __be32
-nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+nlmsvc_proc_unshare(struct svc_rqst *rqstp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
+ struct nlm_res *resp = rqstp->rq_resp;
struct nlm_host *host;
struct nlm_file *file;
@@ -421,22 +450,23 @@ nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
* NM_LOCK: Create an unmonitored lock
*/
static __be32
-nlmsvc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
- struct nlm_res *resp)
+nlmsvc_proc_nm_lock(struct svc_rqst *rqstp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
+
dprintk("lockd: NM_LOCK called\n");
argp->monitor = 0; /* just clean the monitor flag */
- return nlmsvc_proc_lock(rqstp, argp, resp);
+ return nlmsvc_proc_lock(rqstp);
}
/*
* FREE_ALL: Release all locks and shares held by client
*/
static __be32
-nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
- void *resp)
+nlmsvc_proc_free_all(struct svc_rqst *rqstp)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
/* Obtain client */
@@ -452,9 +482,10 @@ nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
* SM_NOTIFY: private callback from statd (not part of official NLM proto)
*/
static __be32
-nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
- void *resp)
+nlmsvc_proc_sm_notify(struct svc_rqst *rqstp)
{
+ struct nlm_reboot *argp = rqstp->rq_argp;
+
dprintk("lockd: SM_NOTIFY called\n");
if (!nlm_privileged_requester(rqstp)) {
@@ -472,9 +503,10 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
* client sent a GRANTED_RES, let's remove the associated block
*/
static __be32
-nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
- void *resp)
+nlmsvc_proc_granted_res(struct svc_rqst *rqstp)
{
+ struct nlm_res *argp = rqstp->rq_argp;
+
if (!nlmsvc_ops)
return rpc_success;
@@ -505,9 +537,9 @@ nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
struct nlm_void { int dummy; };
#define PROC(name, xargt, xrest, argt, rest, respsize) \
- { .pc_func = (svc_procfunc) nlmsvc_proc_##name, \
- .pc_decode = (kxdrproc_t) nlmsvc_decode_##xargt, \
- .pc_encode = (kxdrproc_t) nlmsvc_encode_##xrest, \
+ { .pc_func = nlmsvc_proc_##name, \
+ .pc_decode = nlmsvc_decode_##xargt, \
+ .pc_encode = nlmsvc_encode_##xrest, \
.pc_release = NULL, \
.pc_argsize = sizeof(struct nlm_##argt), \
.pc_ressize = sizeof(struct nlm_##rest), \
@@ -519,7 +551,7 @@ struct nlm_void { int dummy; };
#define No (1+1024/4) /* Net Obj */
#define Rg 2 /* range - offset + size */
-struct svc_procedure nlmsvc_procedures[] = {
+const struct svc_procedure nlmsvc_procedures[] = {
PROC(null, void, void, void, void, 1),
PROC(test, testargs, testres, args, res, Ck+St+2+No+Rg),
PROC(lock, lockargs, res, args, res, Ck+St),
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 5b651daad518..442bbd0b0b29 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -182,8 +182,9 @@ nlm_encode_testres(__be32 *p, struct nlm_res *resp)
* First, the server side XDR functions
*/
int
-nlmsvc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+nlmsvc_decode_testargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
u32 exclusive;
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
@@ -199,16 +200,19 @@ nlmsvc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
}
int
-nlmsvc_encode_testres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+nlmsvc_encode_testres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_res *resp = rqstp->rq_resp;
+
if (!(p = nlm_encode_testres(p, resp)))
return 0;
return xdr_ressize_check(rqstp, p);
}
int
-nlmsvc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+nlmsvc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
u32 exclusive;
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
@@ -227,8 +231,9 @@ nlmsvc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
}
int
-nlmsvc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+nlmsvc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
u32 exclusive;
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
@@ -243,8 +248,10 @@ nlmsvc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
}
int
-nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
+
if (!(p = nlm_decode_cookie(p, &argp->cookie))
|| !(p = nlm_decode_lock(p, &argp->lock)))
return 0;
@@ -253,8 +260,9 @@ nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
}
int
-nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_lock *lock = &argp->lock;
memset(lock, 0, sizeof(*lock));
@@ -274,8 +282,10 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
}
int
-nlmsvc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+nlmsvc_encode_shareres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_res *resp = rqstp->rq_resp;
+
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
@@ -284,8 +294,10 @@ nlmsvc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
}
int
-nlmsvc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+nlmsvc_encode_res(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_res *resp = rqstp->rq_resp;
+
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
@@ -293,8 +305,9 @@ nlmsvc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
}
int
-nlmsvc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp)
+nlmsvc_decode_notify(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_lock *lock = &argp->lock;
if (!(p = xdr_decode_string_inplace(p, &lock->caller,
@@ -305,8 +318,10 @@ nlmsvc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp)
}
int
-nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp)
+nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_reboot *argp = rqstp->rq_argp;
+
if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
@@ -316,8 +331,10 @@ nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp)
}
int
-nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_res *resp = rqstp->rq_argp;
+
if (!(p = nlm_decode_cookie(p, &resp->cookie)))
return 0;
resp->status = *p++;
@@ -325,13 +342,13 @@ nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
}
int
-nlmsvc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+nlmsvc_decode_void(struct svc_rqst *rqstp, __be32 *p)
{
return xdr_argsize_check(rqstp, p);
}
int
-nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p)
{
return xdr_ressize_check(rqstp, p);
}
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index dfa4789cd460..2a0cd5679c49 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -179,8 +179,9 @@ nlm4_encode_testres(__be32 *p, struct nlm_res *resp)
* First, the server side XDR functions
*/
int
-nlm4svc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+nlm4svc_decode_testargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
u32 exclusive;
if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
@@ -196,16 +197,19 @@ nlm4svc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
}
int
-nlm4svc_encode_testres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+nlm4svc_encode_testres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_res *resp = rqstp->rq_resp;
+
if (!(p = nlm4_encode_testres(p, resp)))
return 0;
return xdr_ressize_check(rqstp, p);
}
int
-nlm4svc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+nlm4svc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
u32 exclusive;
if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
@@ -224,8 +228,9 @@ nlm4svc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
}
int
-nlm4svc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+nlm4svc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
u32 exclusive;
if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
@@ -240,8 +245,10 @@ nlm4svc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
}
int
-nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
+
if (!(p = nlm4_decode_cookie(p, &argp->cookie))
|| !(p = nlm4_decode_lock(p, &argp->lock)))
return 0;
@@ -250,8 +257,9 @@ nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
}
int
-nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_lock *lock = &argp->lock;
memset(lock, 0, sizeof(*lock));
@@ -271,8 +279,10 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
}
int
-nlm4svc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+nlm4svc_encode_shareres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_res *resp = rqstp->rq_resp;
+
if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
@@ -281,8 +291,10 @@ nlm4svc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
}
int
-nlm4svc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+nlm4svc_encode_res(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_res *resp = rqstp->rq_resp;
+
if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
@@ -290,8 +302,9 @@ nlm4svc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
}
int
-nlm4svc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp)
+nlm4svc_decode_notify(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_args *argp = rqstp->rq_argp;
struct nlm_lock *lock = &argp->lock;
if (!(p = xdr_decode_string_inplace(p, &lock->caller,
@@ -302,8 +315,10 @@ nlm4svc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp)
}
int
-nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp)
+nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_reboot *argp = rqstp->rq_argp;
+
if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
@@ -313,8 +328,10 @@ nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp
}
int
-nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nlm_res *resp = rqstp->rq_argp;
+
if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
return 0;
resp->status = *p++;
@@ -322,13 +339,13 @@ nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
}
int
-nlm4svc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+nlm4svc_decode_void(struct svc_rqst *rqstp, __be32 *p)
{
return xdr_argsize_check(rqstp, p);
}
int
-nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p)
{
return xdr_ressize_check(rqstp, p);
}
diff --git a/fs/mount.h b/fs/mount.h
index de45d9e76748..6790767d1883 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -16,7 +16,7 @@ struct mnt_namespace {
u64 event;
unsigned int mounts; /* # of mounts in the namespace */
unsigned int pending_mounts;
-};
+} __randomize_layout;
struct mnt_pcp {
int mnt_count;
@@ -69,7 +69,7 @@ struct mount {
struct hlist_head mnt_pins;
struct fs_pin mnt_umount;
struct dentry *mnt_ex_mountpoint;
-};
+} __randomize_layout;
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
index e0b46eb0e212..ddb6a7c2b3d4 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -524,7 +524,7 @@ struct nameidata {
struct inode *link_inode;
unsigned root_seq;
int dfd;
-};
+} __randomize_layout;
static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
{
@@ -3400,7 +3400,6 @@ out:
struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag)
{
- static const struct qstr name = QSTR_INIT("/", 1);
struct dentry *child = NULL;
struct inode *dir = dentry->d_inode;
struct inode *inode;
@@ -3414,7 +3413,7 @@ struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag)
if (!dir->i_op->tmpfile)
goto out_err;
error = -ENOMEM;
- child = d_alloc(dentry, &name);
+ child = d_alloc(dentry, &slash_name);
if (unlikely(!child))
goto out_err;
error = dir->i_op->tmpfile(dir, child, mode);
diff --git a/fs/namespace.c b/fs/namespace.c
index 81f934b5d571..f8893dc6a989 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1238,65 +1238,6 @@ struct vfsmount *mnt_clone_internal(const struct path *path)
return &p->mnt;
}
-static inline void mangle(struct seq_file *m, const char *s)
-{
- seq_escape(m, s, " \t\n\\");
-}
-
-/*
- * Simple .show_options callback for filesystems which don't want to
- * implement more complex mount option showing.
- *
- * See also save_mount_options().
- */
-int generic_show_options(struct seq_file *m, struct dentry *root)
-{
- const char *options;
-
- rcu_read_lock();
- options = rcu_dereference(root->d_sb->s_options);
-
- if (options != NULL && options[0]) {
- seq_putc(m, ',');
- mangle(m, options);
- }
- rcu_read_unlock();
-
- return 0;
-}
-EXPORT_SYMBOL(generic_show_options);
-
-/*
- * If filesystem uses generic_show_options(), this function should be
- * called from the fill_super() callback.
- *
- * The .remount_fs callback usually needs to be handled in a special
- * way, to make sure, that previous options are not overwritten if the
- * remount fails.
- *
- * Also note, that if the filesystem's .remount_fs function doesn't
- * reset all options to their default value, but changes only newly
- * given options, then the displayed options will not reflect reality
- * any more.
- */
-void save_mount_options(struct super_block *sb, char *options)
-{
- BUG_ON(sb->s_options);
- rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
-}
-EXPORT_SYMBOL(save_mount_options);
-
-void replace_mount_options(struct super_block *sb, char *options)
-{
- char *old = sb->s_options;
- rcu_assign_pointer(sb->s_options, options);
- if (old) {
- synchronize_rcu();
- kfree(old);
- }
-}
-EXPORT_SYMBOL(replace_mount_options);
-
#ifdef CONFIG_PROC_FS
/* iterator; we want it to have access to namespace_sem, thus here... */
static void *m_start(struct seq_file *m, loff_t *pos)
@@ -1657,7 +1598,7 @@ out_unlock:
namespace_unlock();
}
-/*
+/*
* Is the caller allowed to modify his namespace?
*/
static inline bool may_mount(void)
@@ -2211,7 +2152,7 @@ static int do_loopback(struct path *path, const char *old_name,
err = -EINVAL;
if (mnt_ns_loop(old_path.dentry))
- goto out;
+ goto out;
mp = lock_mount(path);
err = PTR_ERR(mp);
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 98f4e5728a67..1fb118902d57 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_NFS_FS) += nfs.o
CFLAGS_nfstrace.o += -I$(src)
nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
io.o direct.o pagelist.o read.o symlink.o unlink.o \
- write.o namespace.o mount_clnt.o nfstrace.o
+ write.o namespace.o mount_clnt.o nfstrace.o export.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 73a1f928226c..34323877ec13 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -439,7 +439,7 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp)
/*
* Define NFS4 callback program
*/
-static struct svc_version *nfs4_callback_version[] = {
+static const struct svc_version *nfs4_callback_version[] = {
[1] = &nfs4_callback_version1,
[4] = &nfs4_callback_version4,
};
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index c701c308fac5..3dc54d7cb19c 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -114,8 +114,7 @@ struct cb_sequenceres {
uint32_t csr_target_highestslotid;
};
-extern __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
- struct cb_sequenceres *res,
+extern __be32 nfs4_callback_sequence(void *argp, void *resp,
struct cb_process_state *cps);
#define RCA4_TYPE_MASK_RDATA_DLG 0
@@ -134,15 +133,13 @@ struct cb_recallanyargs {
uint32_t craa_type_mask;
};
-extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args,
- void *dummy,
+extern __be32 nfs4_callback_recallany(void *argp, void *resp,
struct cb_process_state *cps);
struct cb_recallslotargs {
uint32_t crsa_target_highest_slotid;
};
-extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args,
- void *dummy,
+extern __be32 nfs4_callback_recallslot(void *argp, void *resp,
struct cb_process_state *cps);
struct cb_layoutrecallargs {
@@ -159,9 +156,8 @@ struct cb_layoutrecallargs {
};
};
-extern __be32 nfs4_callback_layoutrecall(
- struct cb_layoutrecallargs *args,
- void *dummy, struct cb_process_state *cps);
+extern __be32 nfs4_callback_layoutrecall(void *argp, void *resp,
+ struct cb_process_state *cps);
struct cb_devicenotifyitem {
uint32_t cbd_notify_type;
@@ -175,9 +171,8 @@ struct cb_devicenotifyargs {
struct cb_devicenotifyitem *devs;
};
-extern __be32 nfs4_callback_devicenotify(
- struct cb_devicenotifyargs *args,
- void *dummy, struct cb_process_state *cps);
+extern __be32 nfs4_callback_devicenotify(void *argp, void *resp,
+ struct cb_process_state *cps);
struct cb_notify_lock_args {
struct nfs_fh cbnl_fh;
@@ -185,15 +180,13 @@ struct cb_notify_lock_args {
bool cbnl_valid;
};
-extern __be32 nfs4_callback_notify_lock(struct cb_notify_lock_args *args,
- void *dummy,
+extern __be32 nfs4_callback_notify_lock(void *argp, void *resp,
struct cb_process_state *cps);
#endif /* CONFIG_NFS_V4_1 */
extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *);
-extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
- struct cb_getattrres *res,
+extern __be32 nfs4_callback_getattr(void *argp, void *resp,
struct cb_process_state *cps);
-extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+extern __be32 nfs4_callback_recall(void *argp, void *resp,
struct cb_process_state *cps);
#if IS_ENABLED(CONFIG_NFS_V4)
extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 52479f180ea1..5427cdf04c5a 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -19,10 +19,11 @@
#define NFSDBG_FACILITY NFSDBG_CALLBACK
-__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
- struct cb_getattrres *res,
+__be32 nfs4_callback_getattr(void *argp, void *resp,
struct cb_process_state *cps)
{
+ struct cb_getattrargs *args = argp;
+ struct cb_getattrres *res = resp;
struct nfs_delegation *delegation;
struct nfs_inode *nfsi;
struct inode *inode;
@@ -68,9 +69,10 @@ out:
return res->status;
}
-__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+__be32 nfs4_callback_recall(void *argp, void *resp,
struct cb_process_state *cps)
{
+ struct cb_recallargs *args = argp;
struct inode *inode;
__be32 res;
@@ -324,9 +326,10 @@ static u32 do_callback_layoutrecall(struct nfs_client *clp,
return initiate_bulk_draining(clp, args);
}
-__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
- void *dummy, struct cb_process_state *cps)
+__be32 nfs4_callback_layoutrecall(void *argp, void *resp,
+ struct cb_process_state *cps)
{
+ struct cb_layoutrecallargs *args = argp;
u32 res = NFS4ERR_OP_NOT_IN_SESSION;
if (cps->clp)
@@ -345,9 +348,10 @@ static void pnfs_recall_all_layouts(struct nfs_client *clp)
do_callback_layoutrecall(clp, &args);
}
-__be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
- void *dummy, struct cb_process_state *cps)
+__be32 nfs4_callback_devicenotify(void *argp, void *resp,
+ struct cb_process_state *cps)
{
+ struct cb_devicenotifyargs *args = argp;
int i;
__be32 res = 0;
struct nfs_client *clp = cps->clp;
@@ -469,10 +473,11 @@ out:
return status;
}
-__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
- struct cb_sequenceres *res,
+__be32 nfs4_callback_sequence(void *argp, void *resp,
struct cb_process_state *cps)
{
+ struct cb_sequenceargs *args = argp;
+ struct cb_sequenceres *res = resp;
struct nfs4_slot_table *tbl;
struct nfs4_slot *slot;
struct nfs_client *clp;
@@ -571,9 +576,10 @@ validate_bitmap_values(unsigned long mask)
return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
}
-__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
+__be32 nfs4_callback_recallany(void *argp, void *resp,
struct cb_process_state *cps)
{
+ struct cb_recallanyargs *args = argp;
__be32 status;
fmode_t flags = 0;
@@ -606,9 +612,10 @@ out:
}
/* Reduce the fore channel's max_slots to the target value */
-__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
+__be32 nfs4_callback_recallslot(void *argp, void *resp,
struct cb_process_state *cps)
{
+ struct cb_recallslotargs *args = argp;
struct nfs4_slot_table *fc_tbl;
__be32 status;
@@ -631,9 +638,11 @@ out:
return status;
}
-__be32 nfs4_callback_notify_lock(struct cb_notify_lock_args *args, void *dummy,
+__be32 nfs4_callback_notify_lock(void *argp, void *resp,
struct cb_process_state *cps)
{
+ struct cb_notify_lock_args *args = argp;
+
if (!cps->clp) /* set in cb_sequence */
return htonl(NFS4ERR_OP_NOT_IN_SESSION);
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 390ac9c39c59..681dd642f119 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -43,32 +43,27 @@
/* Internal error code */
#define NFS4ERR_RESOURCE_HDR 11050
-typedef __be32 (*callback_process_op_t)(void *, void *,
- struct cb_process_state *);
-typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *);
-typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *);
-
-
struct callback_op {
- callback_process_op_t process_op;
- callback_decode_arg_t decode_args;
- callback_encode_res_t encode_res;
+ __be32 (*process_op)(void *, void *, struct cb_process_state *);
+ __be32 (*decode_args)(struct svc_rqst *, struct xdr_stream *, void *);
+ __be32 (*encode_res)(struct svc_rqst *, struct xdr_stream *,
+ const void *);
long res_maxsize;
};
static struct callback_op callback_ops[];
-static __be32 nfs4_callback_null(struct svc_rqst *rqstp, void *argp, void *resp)
+static __be32 nfs4_callback_null(struct svc_rqst *rqstp)
{
return htonl(NFS4_OK);
}
-static int nfs4_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfs4_decode_void(struct svc_rqst *rqstp, __be32 *p)
{
return xdr_argsize_check(rqstp, p);
}
-static int nfs4_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfs4_encode_void(struct svc_rqst *rqstp, __be32 *p)
{
return xdr_ressize_check(rqstp, p);
}
@@ -184,8 +179,10 @@ static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op)
return 0;
}
-static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_getattrargs *args)
+static __be32 decode_getattr_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr, void *argp)
{
+ struct cb_getattrargs *args = argp;
__be32 status;
status = decode_fh(xdr, &args->fh);
@@ -194,8 +191,10 @@ static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr
return decode_bitmap(xdr, args->bitmap);
}
-static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_recallargs *args)
+static __be32 decode_recall_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr, void *argp)
{
+ struct cb_recallargs *args = argp;
__be32 *p;
__be32 status;
@@ -217,9 +216,9 @@ static __be32 decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *statei
}
static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
- struct xdr_stream *xdr,
- struct cb_layoutrecallargs *args)
+ struct xdr_stream *xdr, void *argp)
{
+ struct cb_layoutrecallargs *args = argp;
__be32 *p;
__be32 status = 0;
uint32_t iomode;
@@ -262,8 +261,9 @@ static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
static
__be32 decode_devicenotify_args(struct svc_rqst *rqstp,
struct xdr_stream *xdr,
- struct cb_devicenotifyargs *args)
+ void *argp)
{
+ struct cb_devicenotifyargs *args = argp;
__be32 *p;
__be32 status = 0;
u32 tmp;
@@ -403,8 +403,9 @@ out:
static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
struct xdr_stream *xdr,
- struct cb_sequenceargs *args)
+ void *argp)
{
+ struct cb_sequenceargs *args = argp;
__be32 *p;
int i;
__be32 status;
@@ -450,8 +451,9 @@ out_free:
static __be32 decode_recallany_args(struct svc_rqst *rqstp,
struct xdr_stream *xdr,
- struct cb_recallanyargs *args)
+ void *argp)
{
+ struct cb_recallanyargs *args = argp;
uint32_t bitmap[2];
__be32 *p, status;
@@ -469,8 +471,9 @@ static __be32 decode_recallany_args(struct svc_rqst *rqstp,
static __be32 decode_recallslot_args(struct svc_rqst *rqstp,
struct xdr_stream *xdr,
- struct cb_recallslotargs *args)
+ void *argp)
{
+ struct cb_recallslotargs *args = argp;
__be32 *p;
p = read_buf(xdr, 4);
@@ -510,8 +513,10 @@ static __be32 decode_lockowner(struct xdr_stream *xdr, struct cb_notify_lock_arg
return 0;
}
-static __be32 decode_notify_lock_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_notify_lock_args *args)
+static __be32 decode_notify_lock_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr, void *argp)
{
+ struct cb_notify_lock_args *args = argp;
__be32 status;
status = decode_fh(xdr, &args->cbnl_fh);
@@ -641,8 +646,10 @@ static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res)
return 0;
}
-static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, const struct cb_getattrres *res)
+static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr,
+ const void *resp)
{
+ const struct cb_getattrres *res = resp;
__be32 *savep = NULL;
__be32 status = res->status;
@@ -683,8 +690,9 @@ static __be32 encode_sessionid(struct xdr_stream *xdr,
static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp,
struct xdr_stream *xdr,
- const struct cb_sequenceres *res)
+ const void *resp)
{
+ const struct cb_sequenceres *res = resp;
__be32 *p;
__be32 status = res->csr_status;
@@ -871,7 +879,7 @@ encode_hdr:
/*
* Decode, process and encode a COMPOUND
*/
-static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp)
+static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
{
struct cb_compound_hdr_arg hdr_arg = { 0 };
struct cb_compound_hdr_res hdr_res = { NULL };
@@ -907,7 +915,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
while (status == 0 && nops != hdr_arg.nops) {
status = process_op(nops, rqstp, &xdr_in,
- argp, &xdr_out, resp, &cps);
+ rqstp->rq_argp, &xdr_out, rqstp->rq_resp,
+ &cps);
nops++;
}
@@ -937,48 +946,46 @@ static struct callback_op callback_ops[] = {
.res_maxsize = CB_OP_HDR_RES_MAXSZ,
},
[OP_CB_GETATTR] = {
- .process_op = (callback_process_op_t)nfs4_callback_getattr,
- .decode_args = (callback_decode_arg_t)decode_getattr_args,
- .encode_res = (callback_encode_res_t)encode_getattr_res,
+ .process_op = nfs4_callback_getattr,
+ .decode_args = decode_getattr_args,
+ .encode_res = encode_getattr_res,
.res_maxsize = CB_OP_GETATTR_RES_MAXSZ,
},
[OP_CB_RECALL] = {
- .process_op = (callback_process_op_t)nfs4_callback_recall,
- .decode_args = (callback_decode_arg_t)decode_recall_args,
+ .process_op = nfs4_callback_recall,
+ .decode_args = decode_recall_args,
.res_maxsize = CB_OP_RECALL_RES_MAXSZ,
},
#if defined(CONFIG_NFS_V4_1)
[OP_CB_LAYOUTRECALL] = {
- .process_op = (callback_process_op_t)nfs4_callback_layoutrecall,
- .decode_args =
- (callback_decode_arg_t)decode_layoutrecall_args,
+ .process_op = nfs4_callback_layoutrecall,
+ .decode_args = decode_layoutrecall_args,
.res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ,
},
[OP_CB_NOTIFY_DEVICEID] = {
- .process_op = (callback_process_op_t)nfs4_callback_devicenotify,
- .decode_args =
- (callback_decode_arg_t)decode_devicenotify_args,
+ .process_op = nfs4_callback_devicenotify,
+ .decode_args = decode_devicenotify_args,
.res_maxsize = CB_OP_DEVICENOTIFY_RES_MAXSZ,
},
[OP_CB_SEQUENCE] = {
- .process_op = (callback_process_op_t)nfs4_callback_sequence,
- .decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
- .encode_res = (callback_encode_res_t)encode_cb_sequence_res,
+ .process_op = nfs4_callback_sequence,
+ .decode_args = decode_cb_sequence_args,
+ .encode_res = encode_cb_sequence_res,
.res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
},
[OP_CB_RECALL_ANY] = {
- .process_op = (callback_process_op_t)nfs4_callback_recallany,
- .decode_args = (callback_decode_arg_t)decode_recallany_args,
+ .process_op = nfs4_callback_recallany,
+ .decode_args = decode_recallany_args,
.res_maxsize = CB_OP_RECALLANY_RES_MAXSZ,
},
[OP_CB_RECALL_SLOT] = {
- .process_op = (callback_process_op_t)nfs4_callback_recallslot,
- .decode_args = (callback_decode_arg_t)decode_recallslot_args,
+ .process_op = nfs4_callback_recallslot,
+ .decode_args = decode_recallslot_args,
.res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ,
},
[OP_CB_NOTIFY_LOCK] = {
- .process_op = (callback_process_op_t)nfs4_callback_notify_lock,
- .decode_args = (callback_decode_arg_t)decode_notify_lock_args,
+ .process_op = nfs4_callback_notify_lock,
+ .decode_args = decode_notify_lock_args,
.res_maxsize = CB_OP_NOTIFY_LOCK_RES_MAXSZ,
},
#endif /* CONFIG_NFS_V4_1 */
@@ -987,36 +994,40 @@ static struct callback_op callback_ops[] = {
/*
* Define NFS4 callback procedures
*/
-static struct svc_procedure nfs4_callback_procedures1[] = {
+static const struct svc_procedure nfs4_callback_procedures1[] = {
[CB_NULL] = {
.pc_func = nfs4_callback_null,
- .pc_decode = (kxdrproc_t)nfs4_decode_void,
- .pc_encode = (kxdrproc_t)nfs4_encode_void,
+ .pc_decode = nfs4_decode_void,
+ .pc_encode = nfs4_encode_void,
.pc_xdrressize = 1,
},
[CB_COMPOUND] = {
.pc_func = nfs4_callback_compound,
- .pc_encode = (kxdrproc_t)nfs4_encode_void,
+ .pc_encode = nfs4_encode_void,
.pc_argsize = 256,
.pc_ressize = 256,
.pc_xdrressize = NFS4_CALLBACK_BUFSIZE,
}
};
-struct svc_version nfs4_callback_version1 = {
+static unsigned int nfs4_callback_count1[ARRAY_SIZE(nfs4_callback_procedures1)];
+const struct svc_version nfs4_callback_version1 = {
.vs_vers = 1,
.vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
.vs_proc = nfs4_callback_procedures1,
+ .vs_count = nfs4_callback_count1,
.vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
.vs_dispatch = NULL,
.vs_hidden = true,
.vs_need_cong_ctrl = true,
};
-struct svc_version nfs4_callback_version4 = {
+static unsigned int nfs4_callback_count4[ARRAY_SIZE(nfs4_callback_procedures1)];
+const struct svc_version nfs4_callback_version4 = {
.vs_vers = 4,
.vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
.vs_proc = nfs4_callback_procedures1,
+ .vs_count = nfs4_callback_count4,
.vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
.vs_dispatch = NULL,
.vs_hidden = true,
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 2ac00bf4ecf1..5ac484fe0dee 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -151,7 +151,7 @@ struct nfs_cache_array {
struct nfs_cache_array_entry array[0];
};
-typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int);
+typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, bool);
typedef struct {
struct file *file;
struct page *page;
@@ -165,8 +165,8 @@ typedef struct {
unsigned long timestamp;
unsigned long gencount;
unsigned int cache_entry_index;
- unsigned int plus:1;
- unsigned int eof:1;
+ bool plus;
+ bool eof;
} nfs_readdir_descriptor_t;
/*
@@ -355,7 +355,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
if (error == -ENOTSUPP && desc->plus) {
NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
- desc->plus = 0;
+ desc->plus = false;
goto again;
}
goto error;
@@ -557,7 +557,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
count++;
- if (desc->plus != 0)
+ if (desc->plus)
nfs_prime_dcache(file_dentry(desc->file), entry);
status = nfs_readdir_add_to_array(entry, page);
@@ -860,7 +860,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
desc->ctx = ctx;
desc->dir_cookie = &dir_ctx->dir_cookie;
desc->decode = NFS_PROTO(inode)->decode_dirent;
- desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
+ desc->plus = nfs_use_readdirplus(inode, ctx);
if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
res = nfs_revalidate_mapping(inode, file->f_mapping);
@@ -885,8 +885,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
nfs_zap_caches(inode);
desc->page_index = 0;
- desc->plus = 0;
- desc->eof = 0;
+ desc->plus = false;
+ desc->eof = false;
continue;
}
if (res < 0)
@@ -1115,11 +1115,13 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
/* Force a full look up iff the parent directory has changed */
if (!nfs_is_exclusive_create(dir, flags) &&
nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
-
- if (nfs_lookup_verify_inode(inode, flags)) {
+ error = nfs_lookup_verify_inode(inode, flags);
+ if (error) {
if (flags & LOOKUP_RCU)
return -ECHILD;
- goto out_zap_parent;
+ if (error == -ESTALE)
+ goto out_zap_parent;
+ goto out_error;
}
nfs_advise_use_readdirplus(dir);
goto out_valid;
@@ -1144,8 +1146,10 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
- if (error)
+ if (error == -ESTALE || error == -ENOENT)
goto out_bad;
+ if (error)
+ goto out_error;
if (nfs_compare_fh(NFS_FH(inode), fhandle))
goto out_bad;
if ((error = nfs_refresh_inode(inode, fattr)) != 0)
@@ -1427,8 +1431,10 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
err = finish_open(file, dentry, do_open, opened);
if (err)
goto out;
- nfs_file_set_open_context(file, ctx);
-
+ if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
+ nfs_file_set_open_context(file, ctx);
+ else
+ err = -ESTALE;
out:
return err;
}
@@ -1512,7 +1518,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
d_drop(dentry);
switch (err) {
case -ENOENT:
- d_add(dentry, NULL);
+ d_splice_alias(NULL, dentry);
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
break;
case -EISDIR:
@@ -2035,7 +2041,11 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
error = rpc_wait_for_completion_task(task);
- if (error == 0)
+ if (error != 0) {
+ ((struct nfs_renamedata *)task->tk_calldata)->cancelled = 1;
+ /* Paired with the atomic_dec_and_test() barrier in rpc_do_put_task() */
+ smp_wmb();
+ } else
error = task->tk_status;
rpc_put_task(task);
nfs_mark_for_revalidate(old_inode);
diff --git a/fs/nfs/export.c b/fs/nfs/export.c
new file mode 100644
index 000000000000..249cb96cc5b5
--- /dev/null
+++ b/fs/nfs/export.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2015, Primary Data, Inc. All rights reserved.
+ *
+ * Tao Peng <bergwolf@primarydata.com>
+ */
+#include <linux/dcache.h>
+#include <linux/exportfs.h>
+#include <linux/nfs.h>
+#include <linux/nfs_fs.h>
+
+#include "internal.h"
+#include "nfstrace.h"
+
+#define NFSDBG_FACILITY NFSDBG_VFS
+
+enum {
+ FILEID_HIGH_OFF = 0, /* inode fileid high */
+ FILEID_LOW_OFF, /* inode fileid low */
+ FILE_I_TYPE_OFF, /* inode type */
+ EMBED_FH_OFF /* embeded server fh */
+};
+
+
+static struct nfs_fh *nfs_exp_embedfh(__u32 *p)
+{
+ return (struct nfs_fh *)(p + EMBED_FH_OFF);
+}
+
+/*
+ * Let's break subtree checking for now... otherwise we'll have to embed parent fh
+ * but there might not be enough space.
+ */
+static int
+nfs_encode_fh(struct inode *inode, __u32 *p, int *max_len, struct inode *parent)
+{
+ struct nfs_fh *server_fh = NFS_FH(inode);
+ struct nfs_fh *clnt_fh = nfs_exp_embedfh(p);
+ size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size;
+ int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size);
+
+ dprintk("%s: max fh len %d inode %p parent %p",
+ __func__, *max_len, inode, parent);
+
+ if (*max_len < len || IS_AUTOMOUNT(inode)) {
+ dprintk("%s: fh len %d too small, required %d\n",
+ __func__, *max_len, len);
+ *max_len = len;
+ return FILEID_INVALID;
+ }
+ if (IS_AUTOMOUNT(inode)) {
+ *max_len = FILEID_INVALID;
+ goto out;
+ }
+
+ p[FILEID_HIGH_OFF] = NFS_FILEID(inode) >> 32;
+ p[FILEID_LOW_OFF] = NFS_FILEID(inode);
+ p[FILE_I_TYPE_OFF] = inode->i_mode & S_IFMT;
+ p[len - 1] = 0; /* Padding */
+ nfs_copy_fh(clnt_fh, server_fh);
+ *max_len = len;
+out:
+ dprintk("%s: result fh fileid %llu mode %u size %d\n",
+ __func__, NFS_FILEID(inode), inode->i_mode, *max_len);
+ return *max_len;
+}
+
+static struct dentry *
+nfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ struct nfs4_label *label = NULL;
+ struct nfs_fattr *fattr = NULL;
+ struct nfs_fh *server_fh = nfs_exp_embedfh(fid->raw);
+ size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size;
+ const struct nfs_rpc_ops *rpc_ops;
+ struct dentry *dentry;
+ struct inode *inode;
+ int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size);
+ u32 *p = fid->raw;
+ int ret;
+
+ /* NULL translates to ESTALE */
+ if (fh_len < len || fh_type != len)
+ return NULL;
+
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL) {
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ fattr->fileid = ((u64)p[FILEID_HIGH_OFF] << 32) + p[FILEID_LOW_OFF];
+ fattr->mode = p[FILE_I_TYPE_OFF];
+ fattr->valid |= NFS_ATTR_FATTR_FILEID | NFS_ATTR_FATTR_TYPE;
+
+ dprintk("%s: fileid %llu mode %d\n", __func__, fattr->fileid, fattr->mode);
+
+ inode = nfs_ilookup(sb, fattr, server_fh);
+ if (inode)
+ goto out_found;
+
+ label = nfs4_label_alloc(NFS_SB(sb), GFP_KERNEL);
+ if (IS_ERR(label)) {
+ dentry = ERR_CAST(label);
+ goto out_free_fattr;
+ }
+
+ rpc_ops = NFS_SB(sb)->nfs_client->rpc_ops;
+ ret = rpc_ops->getattr(NFS_SB(sb), server_fh, fattr, label);
+ if (ret) {
+ dprintk("%s: getattr failed %d\n", __func__, ret);
+ dentry = ERR_PTR(ret);
+ goto out_free_label;
+ }
+
+ inode = nfs_fhget(sb, server_fh, fattr, label);
+
+out_found:
+ dentry = d_obtain_alias(inode);
+
+out_free_label:
+ nfs4_label_free(label);
+out_free_fattr:
+ nfs_free_fattr(fattr);
+out:
+ return dentry;
+}
+
+static struct dentry *
+nfs_get_parent(struct dentry *dentry)
+{
+ int ret;
+ struct inode *inode = d_inode(dentry), *pinode;
+ struct super_block *sb = inode->i_sb;
+ struct nfs_server *server = NFS_SB(sb);
+ struct nfs_fattr *fattr = NULL;
+ struct nfs4_label *label = NULL;
+ struct dentry *parent;
+ struct nfs_rpc_ops const *ops = server->nfs_client->rpc_ops;
+ struct nfs_fh fh;
+
+ if (!ops->lookupp)
+ return ERR_PTR(-EACCES);
+
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL) {
+ parent = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ label = nfs4_label_alloc(server, GFP_KERNEL);
+ if (IS_ERR(label)) {
+ parent = ERR_CAST(label);
+ goto out_free_fattr;
+ }
+
+ ret = ops->lookupp(inode, &fh, fattr, label);
+ if (ret) {
+ parent = ERR_PTR(ret);
+ goto out_free_label;
+ }
+
+ pinode = nfs_fhget(sb, &fh, fattr, label);
+ parent = d_obtain_alias(pinode);
+out_free_label:
+ nfs4_label_free(label);
+out_free_fattr:
+ nfs_free_fattr(fattr);
+out:
+ return parent;
+}
+
+const struct export_operations nfs_export_ops = {
+ .encode_fh = nfs_encode_fh,
+ .fh_to_dentry = nfs_fh_to_dentry,
+ .get_parent = nfs_get_parent,
+};
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 1cf85d65b748..080fc6b278bd 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -126,32 +126,13 @@ static int filelayout_async_handle_error(struct rpc_task *task,
{
struct pnfs_layout_hdr *lo = lseg->pls_layout;
struct inode *inode = lo->plh_inode;
- struct nfs_server *mds_server = NFS_SERVER(inode);
struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
- struct nfs_client *mds_client = mds_server->nfs_client;
struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
if (task->tk_status >= 0)
return 0;
switch (task->tk_status) {
- /* MDS state errors */
- case -NFS4ERR_DELEG_REVOKED:
- case -NFS4ERR_ADMIN_REVOKED:
- case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_OPENMODE:
- if (state == NULL)
- break;
- if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
- goto out_bad_stateid;
- goto wait_on_recovery;
- case -NFS4ERR_EXPIRED:
- if (state != NULL) {
- if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
- goto out_bad_stateid;
- }
- nfs4_schedule_lease_recovery(mds_client);
- goto wait_on_recovery;
/* DS session errors */
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
@@ -172,6 +153,7 @@ static int filelayout_async_handle_error(struct rpc_task *task,
case -NFS4ERR_RETRY_UNCACHED_REP:
break;
/* Invalidate Layout errors */
+ case -NFS4ERR_ACCESS:
case -NFS4ERR_PNFS_NO_LAYOUT:
case -ESTALE: /* mapped NFS4ERR_STALE */
case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
@@ -202,26 +184,17 @@ static int filelayout_async_handle_error(struct rpc_task *task,
task->tk_status);
nfs4_mark_deviceid_unavailable(devid);
pnfs_error_mark_layout_for_return(inode, lseg);
+ pnfs_set_lo_fail(lseg);
rpc_wake_up(&tbl->slot_tbl_waitq);
/* fall through */
default:
- pnfs_set_lo_fail(lseg);
reset:
dprintk("%s Retry through MDS. Error %d\n", __func__,
task->tk_status);
return -NFS4ERR_RESET_TO_MDS;
}
-out:
task->tk_status = 0;
return -EAGAIN;
-out_bad_stateid:
- task->tk_status = -EIO;
- return 0;
-wait_on_recovery:
- rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
- if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
- rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
- goto out;
}
/* NFS_PROTO call done callback routines */
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 23542dc44a25..1f2ac3dd0fe5 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1050,34 +1050,10 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
{
struct pnfs_layout_hdr *lo = lseg->pls_layout;
struct inode *inode = lo->plh_inode;
- struct nfs_server *mds_server = NFS_SERVER(inode);
-
struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
- struct nfs_client *mds_client = mds_server->nfs_client;
struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
switch (task->tk_status) {
- /* MDS state errors */
- case -NFS4ERR_DELEG_REVOKED:
- case -NFS4ERR_ADMIN_REVOKED:
- case -NFS4ERR_BAD_STATEID:
- if (state == NULL)
- break;
- nfs_remove_bad_delegation(state->inode, NULL);
- case -NFS4ERR_OPENMODE:
- if (state == NULL)
- break;
- if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
- goto out_bad_stateid;
- goto wait_on_recovery;
- case -NFS4ERR_EXPIRED:
- if (state != NULL) {
- if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
- goto out_bad_stateid;
- }
- nfs4_schedule_lease_recovery(mds_client);
- goto wait_on_recovery;
- /* DS session errors */
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
@@ -1137,17 +1113,8 @@ reset:
task->tk_status);
return -NFS4ERR_RESET_TO_MDS;
}
-out:
task->tk_status = 0;
return -EAGAIN;
-out_bad_stateid:
- task->tk_status = -EIO;
- return 0;
-wait_on_recovery:
- rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
- if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
- rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
- goto out;
}
/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 1de93ba78dc9..109279d6d91b 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -386,6 +386,28 @@ void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
#endif
EXPORT_SYMBOL_GPL(nfs_setsecurity);
+/* Search for inode identified by fh, fileid and i_mode in inode cache. */
+struct inode *
+nfs_ilookup(struct super_block *sb, struct nfs_fattr *fattr, struct nfs_fh *fh)
+{
+ struct nfs_find_desc desc = {
+ .fh = fh,
+ .fattr = fattr,
+ };
+ struct inode *inode;
+ unsigned long hash;
+
+ if (!(fattr->valid & NFS_ATTR_FATTR_FILEID) ||
+ !(fattr->valid & NFS_ATTR_FATTR_TYPE))
+ return NULL;
+
+ hash = nfs_fattr_to_ino_t(fattr);
+ inode = ilookup5(sb, hash, nfs_find_actor, &desc);
+
+ dprintk("%s: returning %p\n", __func__, inode);
+ return inode;
+}
+
/*
* This is our front-end to iget that looks up inodes by file handle
* instead of inode number.
@@ -525,8 +547,14 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
nfs_fscache_init_inode(inode);
unlock_new_inode(inode);
- } else
- nfs_refresh_inode(inode, fattr);
+ } else {
+ int err = nfs_refresh_inode(inode, fattr);
+ if (err < 0) {
+ iput(inode);
+ inode = ERR_PTR(err);
+ goto out_no_inode;
+ }
+ }
dprintk("NFS: nfs_fhget(%s/%Lu fh_crc=0x%08x ct=%d)\n",
inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode),
@@ -1315,9 +1343,9 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
return 0;
/* Has the inode gone and changed behind our back? */
if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
- return -EIO;
+ return -ESTALE;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
- return -EIO;
+ return -ESTALE;
if (!nfs_file_has_buffered_writers(nfsi)) {
/* Verify a few of the more important attributes */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 8701d7617964..dc456416d2be 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -11,6 +11,8 @@
#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
+extern const struct export_operations nfs_export_ops;
+
struct nfs_string;
/* Maximum number of readahead requests
@@ -226,8 +228,8 @@ static inline void nfs_fs_proc_exit(void)
#endif
/* callback_xdr.c */
-extern struct svc_version nfs4_callback_version1;
-extern struct svc_version nfs4_callback_version4;
+extern const struct svc_version nfs4_callback_version1;
+extern const struct svc_version nfs4_callback_version4;
struct nfs_pageio_descriptor;
/* pagelist.c */
@@ -271,19 +273,19 @@ static inline bool nfs_match_open_context(const struct nfs_open_context *ctx1,
}
/* nfs2xdr.c */
-extern struct rpc_procinfo nfs_procedures[];
+extern const struct rpc_procinfo nfs_procedures[];
extern int nfs2_decode_dirent(struct xdr_stream *,
- struct nfs_entry *, int);
+ struct nfs_entry *, bool);
/* nfs3xdr.c */
-extern struct rpc_procinfo nfs3_procedures[];
+extern const struct rpc_procinfo nfs3_procedures[];
extern int nfs3_decode_dirent(struct xdr_stream *,
- struct nfs_entry *, int);
+ struct nfs_entry *, bool);
/* nfs4xdr.c */
#if IS_ENABLED(CONFIG_NFS_V4)
extern int nfs4_decode_dirent(struct xdr_stream *,
- struct nfs_entry *, int);
+ struct nfs_entry *, bool);
#endif
#ifdef CONFIG_NFS_V4_1
extern const u32 nfs41_maxread_overhead;
@@ -293,7 +295,7 @@ extern const u32 nfs41_maxgetdevinfo_overhead;
/* nfs4proc.c */
#if IS_ENABLED(CONFIG_NFS_V4)
-extern struct rpc_procinfo nfs4_procedures[];
+extern const struct rpc_procinfo nfs4_procedures[];
#endif
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 09b190015df4..3efe946672be 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -304,7 +304,7 @@ static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
}
static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr,
- const char *dirpath)
+ const void *dirpath)
{
encode_mntdirpath(xdr, dirpath);
}
@@ -357,8 +357,9 @@ static int decode_fhandle(struct xdr_stream *xdr, struct mountres *res)
static int mnt_xdr_dec_mountres(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct mountres *res)
+ void *data)
{
+ struct mountres *res = data;
int status;
status = decode_status(xdr, res);
@@ -449,8 +450,9 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
static int mnt_xdr_dec_mountres3(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct mountres *res)
+ void *data)
{
+ struct mountres *res = data;
int status;
status = decode_fhs_status(xdr, res);
@@ -464,11 +466,11 @@ static int mnt_xdr_dec_mountres3(struct rpc_rqst *req,
return decode_auth_flavors(xdr, res);
}
-static struct rpc_procinfo mnt_procedures[] = {
+static const struct rpc_procinfo mnt_procedures[] = {
[MOUNTPROC_MNT] = {
.p_proc = MOUNTPROC_MNT,
- .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
- .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres,
+ .p_encode = mnt_xdr_enc_dirpath,
+ .p_decode = mnt_xdr_dec_mountres,
.p_arglen = MNT_enc_dirpath_sz,
.p_replen = MNT_dec_mountres_sz,
.p_statidx = MOUNTPROC_MNT,
@@ -476,18 +478,18 @@ static struct rpc_procinfo mnt_procedures[] = {
},
[MOUNTPROC_UMNT] = {
.p_proc = MOUNTPROC_UMNT,
- .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
+ .p_encode = mnt_xdr_enc_dirpath,
.p_arglen = MNT_enc_dirpath_sz,
.p_statidx = MOUNTPROC_UMNT,
.p_name = "UMOUNT",
},
};
-static struct rpc_procinfo mnt3_procedures[] = {
+static const struct rpc_procinfo mnt3_procedures[] = {
[MOUNTPROC3_MNT] = {
.p_proc = MOUNTPROC3_MNT,
- .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
- .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres3,
+ .p_encode = mnt_xdr_enc_dirpath,
+ .p_decode = mnt_xdr_dec_mountres3,
.p_arglen = MNT_enc_dirpath_sz,
.p_replen = MNT_dec_mountres3_sz,
.p_statidx = MOUNTPROC3_MNT,
@@ -495,24 +497,27 @@ static struct rpc_procinfo mnt3_procedures[] = {
},
[MOUNTPROC3_UMNT] = {
.p_proc = MOUNTPROC3_UMNT,
- .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
+ .p_encode = mnt_xdr_enc_dirpath,
.p_arglen = MNT_enc_dirpath_sz,
.p_statidx = MOUNTPROC3_UMNT,
.p_name = "UMOUNT",
},
};
-
+static unsigned int mnt_counts[ARRAY_SIZE(mnt_procedures)];
static const struct rpc_version mnt_version1 = {
.number = 1,
.nrprocs = ARRAY_SIZE(mnt_procedures),
.procs = mnt_procedures,
+ .counts = mnt_counts,
};
+static unsigned int mnt3_counts[ARRAY_SIZE(mnt_procedures)];
static const struct rpc_version mnt_version3 = {
.number = 3,
.nrprocs = ARRAY_SIZE(mnt3_procedures),
.procs = mnt3_procedures,
+ .counts = mnt3_counts,
};
static const struct rpc_version *mnt_version[] = {
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index b4e03ed8599d..fe68dabfbde6 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -568,8 +568,10 @@ out_default:
static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_fh *fh)
+ const void *data)
{
+ const struct nfs_fh *fh = data;
+
encode_fhandle(xdr, fh);
}
@@ -583,23 +585,29 @@ static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req,
*/
static void nfs2_xdr_enc_sattrargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_sattrargs *args)
+ const void *data)
{
+ const struct nfs_sattrargs *args = data;
+
encode_fhandle(xdr, args->fh);
encode_sattr(xdr, args->sattr);
}
static void nfs2_xdr_enc_diropargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_diropargs *args)
+ const void *data)
{
+ const struct nfs_diropargs *args = data;
+
encode_diropargs(xdr, args->fh, args->name, args->len);
}
static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_readlinkargs *args)
+ const void *data)
{
+ const struct nfs_readlinkargs *args = data;
+
encode_fhandle(xdr, args->fh);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->pglen, NFS_readlinkres_sz);
@@ -632,8 +640,10 @@ static void encode_readargs(struct xdr_stream *xdr,
static void nfs2_xdr_enc_readargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_pgio_args *args)
+ const void *data)
{
+ const struct nfs_pgio_args *args = data;
+
encode_readargs(xdr, args);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->count, NFS_readres_sz);
@@ -672,8 +682,10 @@ static void encode_writeargs(struct xdr_stream *xdr,
static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_pgio_args *args)
+ const void *data)
{
+ const struct nfs_pgio_args *args = data;
+
encode_writeargs(xdr, args);
xdr->buf->flags |= XDRBUF_WRITE;
}
@@ -688,16 +700,20 @@ static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req,
*/
static void nfs2_xdr_enc_createargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_createargs *args)
+ const void *data)
{
+ const struct nfs_createargs *args = data;
+
encode_diropargs(xdr, args->fh, args->name, args->len);
encode_sattr(xdr, args->sattr);
}
static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_removeargs *args)
+ const void *data)
{
+ const struct nfs_removeargs *args = data;
+
encode_diropargs(xdr, args->fh, args->name.name, args->name.len);
}
@@ -711,8 +727,9 @@ static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req,
*/
static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_renameargs *args)
+ const void *data)
{
+ const struct nfs_renameargs *args = data;
const struct qstr *old = args->old_name;
const struct qstr *new = args->new_name;
@@ -730,8 +747,10 @@ static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req,
*/
static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_linkargs *args)
+ const void *data)
{
+ const struct nfs_linkargs *args = data;
+
encode_fhandle(xdr, args->fromfh);
encode_diropargs(xdr, args->tofh, args->toname, args->tolen);
}
@@ -747,8 +766,10 @@ static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req,
*/
static void nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_symlinkargs *args)
+ const void *data)
{
+ const struct nfs_symlinkargs *args = data;
+
encode_diropargs(xdr, args->fromfh, args->fromname, args->fromlen);
encode_path(xdr, args->pages, args->pathlen);
encode_sattr(xdr, args->sattr);
@@ -777,8 +798,10 @@ static void encode_readdirargs(struct xdr_stream *xdr,
static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_readdirargs *args)
+ const void *data)
{
+ const struct nfs_readdirargs *args = data;
+
encode_readdirargs(xdr, args);
prepare_reply_buffer(req, args->pages, 0,
args->count, NFS_readdirres_sz);
@@ -809,13 +832,13 @@ out_default:
}
static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_fattr *result)
+ void *result)
{
return decode_attrstat(xdr, result, NULL);
}
static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_diropok *result)
+ void *result)
{
return decode_diropres(xdr, result);
}
@@ -860,8 +883,9 @@ out_default:
* };
*/
static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_pgio_res *result)
+ void *data)
{
+ struct nfs_pgio_res *result = data;
enum nfs_stat status;
int error;
@@ -882,8 +906,10 @@ out_default:
}
static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_pgio_res *result)
+ void *data)
{
+ struct nfs_pgio_res *result = data;
+
/* All NFSv2 writes are "file sync" writes */
result->verf->committed = NFS_FILE_SYNC;
return decode_attrstat(xdr, result->fattr, &result->op_status);
@@ -913,7 +939,7 @@ static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
* };
*/
int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
- int plus)
+ bool plus)
{
__be32 *p;
int error;
@@ -1034,7 +1060,7 @@ out_overflow:
}
static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs2_fsstat *result)
+ void *result)
{
enum nfs_stat status;
int error;
@@ -1118,15 +1144,15 @@ static int nfs_stat_to_errno(enum nfs_stat status)
#define PROC(proc, argtype, restype, timer) \
[NFSPROC_##proc] = { \
.p_proc = NFSPROC_##proc, \
- .p_encode = (kxdreproc_t)nfs2_xdr_enc_##argtype, \
- .p_decode = (kxdrdproc_t)nfs2_xdr_dec_##restype, \
+ .p_encode = nfs2_xdr_enc_##argtype, \
+ .p_decode = nfs2_xdr_dec_##restype, \
.p_arglen = NFS_##argtype##_sz, \
.p_replen = NFS_##restype##_sz, \
.p_timer = timer, \
.p_statidx = NFSPROC_##proc, \
.p_name = #proc, \
}
-struct rpc_procinfo nfs_procedures[] = {
+const struct rpc_procinfo nfs_procedures[] = {
PROC(GETATTR, fhandle, attrstat, 1),
PROC(SETATTR, sattrargs, attrstat, 0),
PROC(LOOKUP, diropargs, diropres, 2),
@@ -1144,8 +1170,10 @@ struct rpc_procinfo nfs_procedures[] = {
PROC(STATFS, fhandle, statfsres, 0),
};
+static unsigned int nfs_version2_counts[ARRAY_SIZE(nfs_procedures)];
const struct rpc_version nfs_version2 = {
.number = 2,
.nrprocs = ARRAY_SIZE(nfs_procedures),
- .procs = nfs_procedures
+ .procs = nfs_procedures,
+ .counts = nfs_version2_counts,
};
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 0c07b567118d..df4a7d3ab915 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -621,7 +621,7 @@ out:
*/
static int
nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
- u64 cookie, struct page **pages, unsigned int count, int plus)
+ u64 cookie, struct page **pages, unsigned int count, bool plus)
{
struct inode *dir = d_inode(dentry);
__be32 *verf = NFS_I(dir)->cookieverf;
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 267126d32ec0..e82c9e553224 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -846,8 +846,10 @@ static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh,
*/
static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_fh *fh)
+ const void *data)
{
+ const struct nfs_fh *fh = data;
+
encode_nfs_fh3(xdr, fh);
}
@@ -884,8 +886,9 @@ static void encode_sattrguard3(struct xdr_stream *xdr,
static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_sattrargs *args)
+ const void *data)
{
+ const struct nfs3_sattrargs *args = data;
encode_nfs_fh3(xdr, args->fh);
encode_sattr3(xdr, args->sattr);
encode_sattrguard3(xdr, args);
@@ -900,8 +903,10 @@ static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
*/
static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_diropargs *args)
+ const void *data)
{
+ const struct nfs3_diropargs *args = data;
+
encode_diropargs3(xdr, args->fh, args->name, args->len);
}
@@ -922,8 +927,10 @@ static void encode_access3args(struct xdr_stream *xdr,
static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_accessargs *args)
+ const void *data)
{
+ const struct nfs3_accessargs *args = data;
+
encode_access3args(xdr, args);
}
@@ -936,8 +943,10 @@ static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
*/
static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_readlinkargs *args)
+ const void *data)
{
+ const struct nfs3_readlinkargs *args = data;
+
encode_nfs_fh3(xdr, args->fh);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->pglen, NFS3_readlinkres_sz);
@@ -966,8 +975,10 @@ static void encode_read3args(struct xdr_stream *xdr,
static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_pgio_args *args)
+ const void *data)
{
+ const struct nfs_pgio_args *args = data;
+
encode_read3args(xdr, args);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->count, NFS3_readres_sz);
@@ -1008,8 +1019,10 @@ static void encode_write3args(struct xdr_stream *xdr,
static void nfs3_xdr_enc_write3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_pgio_args *args)
+ const void *data)
{
+ const struct nfs_pgio_args *args = data;
+
encode_write3args(xdr, args);
xdr->buf->flags |= XDRBUF_WRITE;
}
@@ -1055,8 +1068,10 @@ static void encode_createhow3(struct xdr_stream *xdr,
static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_createargs *args)
+ const void *data)
{
+ const struct nfs3_createargs *args = data;
+
encode_diropargs3(xdr, args->fh, args->name, args->len);
encode_createhow3(xdr, args);
}
@@ -1071,8 +1086,10 @@ static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
*/
static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_mkdirargs *args)
+ const void *data)
{
+ const struct nfs3_mkdirargs *args = data;
+
encode_diropargs3(xdr, args->fh, args->name, args->len);
encode_sattr3(xdr, args->sattr);
}
@@ -1091,16 +1108,20 @@ static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
* };
*/
static void encode_symlinkdata3(struct xdr_stream *xdr,
- const struct nfs3_symlinkargs *args)
+ const void *data)
{
+ const struct nfs3_symlinkargs *args = data;
+
encode_sattr3(xdr, args->sattr);
encode_nfspath3(xdr, args->pages, args->pathlen);
}
static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_symlinkargs *args)
+ const void *data)
{
+ const struct nfs3_symlinkargs *args = data;
+
encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
encode_symlinkdata3(xdr, args);
xdr->buf->flags |= XDRBUF_WRITE;
@@ -1160,8 +1181,10 @@ static void encode_mknoddata3(struct xdr_stream *xdr,
static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_mknodargs *args)
+ const void *data)
{
+ const struct nfs3_mknodargs *args = data;
+
encode_diropargs3(xdr, args->fh, args->name, args->len);
encode_mknoddata3(xdr, args);
}
@@ -1175,8 +1198,10 @@ static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
*/
static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_removeargs *args)
+ const void *data)
{
+ const struct nfs_removeargs *args = data;
+
encode_diropargs3(xdr, args->fh, args->name.name, args->name.len);
}
@@ -1190,8 +1215,9 @@ static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
*/
static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_renameargs *args)
+ const void *data)
{
+ const struct nfs_renameargs *args = data;
const struct qstr *old = args->old_name;
const struct qstr *new = args->new_name;
@@ -1209,8 +1235,10 @@ static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
*/
static void nfs3_xdr_enc_link3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_linkargs *args)
+ const void *data)
{
+ const struct nfs3_linkargs *args = data;
+
encode_nfs_fh3(xdr, args->fromfh);
encode_diropargs3(xdr, args->tofh, args->toname, args->tolen);
}
@@ -1240,8 +1268,10 @@ static void encode_readdir3args(struct xdr_stream *xdr,
static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_readdirargs *args)
+ const void *data)
{
+ const struct nfs3_readdirargs *args = data;
+
encode_readdir3args(xdr, args);
prepare_reply_buffer(req, args->pages, 0,
args->count, NFS3_readdirres_sz);
@@ -1280,8 +1310,10 @@ static void encode_readdirplus3args(struct xdr_stream *xdr,
static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_readdirargs *args)
+ const void *data)
{
+ const struct nfs3_readdirargs *args = data;
+
encode_readdirplus3args(xdr, args);
prepare_reply_buffer(req, args->pages, 0,
args->count, NFS3_readdirres_sz);
@@ -1310,8 +1342,10 @@ static void encode_commit3args(struct xdr_stream *xdr,
static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_commitargs *args)
+ const void *data)
{
+ const struct nfs_commitargs *args = data;
+
encode_commit3args(xdr, args);
}
@@ -1319,8 +1353,10 @@ static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_getaclargs *args)
+ const void *data)
{
+ const struct nfs3_getaclargs *args = data;
+
encode_nfs_fh3(xdr, args->fh);
encode_uint32(xdr, args->mask);
if (args->mask & (NFS_ACL | NFS_DFACL))
@@ -1331,8 +1367,9 @@ static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs3_setaclargs *args)
+ const void *data)
{
+ const struct nfs3_setaclargs *args = data;
unsigned int base;
int error;
@@ -1382,7 +1419,7 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
*/
static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_fattr *result)
+ void *result)
{
enum nfs_stat status;
int error;
@@ -1419,7 +1456,7 @@ out_default:
*/
static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_fattr *result)
+ void *result)
{
enum nfs_stat status;
int error;
@@ -1460,8 +1497,9 @@ out_status:
*/
static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs3_diropres *result)
+ void *data)
{
+ struct nfs3_diropres *result = data;
enum nfs_stat status;
int error;
@@ -1507,8 +1545,9 @@ out_default:
*/
static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs3_accessres *result)
+ void *data)
{
+ struct nfs3_accessres *result = data;
enum nfs_stat status;
int error;
@@ -1548,7 +1587,7 @@ out_default:
*/
static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_fattr *result)
+ void *result)
{
enum nfs_stat status;
int error;
@@ -1626,8 +1665,9 @@ out_overflow:
}
static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_pgio_res *result)
+ void *data)
{
+ struct nfs_pgio_res *result = data;
enum nfs_stat status;
int error;
@@ -1699,8 +1739,9 @@ out_eio:
}
static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_pgio_res *result)
+ void *data)
{
+ struct nfs_pgio_res *result = data;
enum nfs_stat status;
int error;
@@ -1764,8 +1805,9 @@ out:
static int nfs3_xdr_dec_create3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs3_diropres *result)
+ void *data)
{
+ struct nfs3_diropres *result = data;
enum nfs_stat status;
int error;
@@ -1804,8 +1846,9 @@ out_default:
*/
static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_removeres *result)
+ void *data)
{
+ struct nfs_removeres *result = data;
enum nfs_stat status;
int error;
@@ -1845,8 +1888,9 @@ out_status:
*/
static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_renameres *result)
+ void *data)
{
+ struct nfs_renameres *result = data;
enum nfs_stat status;
int error;
@@ -1888,8 +1932,9 @@ out_status:
* };
*/
static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs3_linkres *result)
+ void *data)
{
+ struct nfs3_linkres *result = data;
enum nfs_stat status;
int error;
@@ -1946,7 +1991,7 @@ out_status:
* };
*/
int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
- int plus)
+ bool plus)
{
struct nfs_entry old = *entry;
__be32 *p;
@@ -2072,8 +2117,9 @@ out:
static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs3_readdirres *result)
+ void *data)
{
+ struct nfs3_readdirres *result = data;
enum nfs_stat status;
int error;
@@ -2140,8 +2186,9 @@ out_overflow:
static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_fsstat *result)
+ void *data)
{
+ struct nfs_fsstat *result = data;
enum nfs_stat status;
int error;
@@ -2216,8 +2263,9 @@ out_overflow:
static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_fsinfo *result)
+ void *data)
{
+ struct nfs_fsinfo *result = data;
enum nfs_stat status;
int error;
@@ -2279,8 +2327,9 @@ out_overflow:
static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_pathconf *result)
+ void *data)
{
+ struct nfs_pathconf *result = data;
enum nfs_stat status;
int error;
@@ -2320,8 +2369,9 @@ out_status:
*/
static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_commitres *result)
+ void *data)
{
+ struct nfs_commitres *result = data;
enum nfs_stat status;
int error;
@@ -2389,7 +2439,7 @@ out:
static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs3_getaclres *result)
+ void *result)
{
enum nfs_stat status;
int error;
@@ -2408,7 +2458,7 @@ out_default:
static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_fattr *result)
+ void *result)
{
enum nfs_stat status;
int error;
@@ -2495,8 +2545,8 @@ static int nfs3_stat_to_errno(enum nfs_stat status)
#define PROC(proc, argtype, restype, timer) \
[NFS3PROC_##proc] = { \
.p_proc = NFS3PROC_##proc, \
- .p_encode = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args, \
- .p_decode = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res, \
+ .p_encode = nfs3_xdr_enc_##argtype##3args, \
+ .p_decode = nfs3_xdr_dec_##restype##3res, \
.p_arglen = NFS3_##argtype##args_sz, \
.p_replen = NFS3_##restype##res_sz, \
.p_timer = timer, \
@@ -2504,7 +2554,7 @@ static int nfs3_stat_to_errno(enum nfs_stat status)
.p_name = #proc, \
}
-struct rpc_procinfo nfs3_procedures[] = {
+const struct rpc_procinfo nfs3_procedures[] = {
PROC(GETATTR, getattr, getattr, 1),
PROC(SETATTR, setattr, setattr, 0),
PROC(LOOKUP, lookup, lookup, 2),
@@ -2528,18 +2578,20 @@ struct rpc_procinfo nfs3_procedures[] = {
PROC(COMMIT, commit, commit, 5),
};
+static unsigned int nfs_version3_counts[ARRAY_SIZE(nfs3_procedures)];
const struct rpc_version nfs_version3 = {
.number = 3,
.nrprocs = ARRAY_SIZE(nfs3_procedures),
- .procs = nfs3_procedures
+ .procs = nfs3_procedures,
+ .counts = nfs_version3_counts,
};
#ifdef CONFIG_NFS_V3_ACL
-static struct rpc_procinfo nfs3_acl_procedures[] = {
+static const struct rpc_procinfo nfs3_acl_procedures[] = {
[ACLPROC3_GETACL] = {
.p_proc = ACLPROC3_GETACL,
- .p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args,
- .p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res,
+ .p_encode = nfs3_xdr_enc_getacl3args,
+ .p_decode = nfs3_xdr_dec_getacl3res,
.p_arglen = ACL3_getaclargs_sz,
.p_replen = ACL3_getaclres_sz,
.p_timer = 1,
@@ -2547,8 +2599,8 @@ static struct rpc_procinfo nfs3_acl_procedures[] = {
},
[ACLPROC3_SETACL] = {
.p_proc = ACLPROC3_SETACL,
- .p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args,
- .p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res,
+ .p_encode = nfs3_xdr_enc_setacl3args,
+ .p_decode = nfs3_xdr_dec_setacl3res,
.p_arglen = ACL3_setaclargs_sz,
.p_replen = ACL3_setaclres_sz,
.p_timer = 0,
@@ -2556,10 +2608,11 @@ static struct rpc_procinfo nfs3_acl_procedures[] = {
},
};
+static unsigned int nfs3_acl_counts[ARRAY_SIZE(nfs3_acl_procedures)];
const struct rpc_version nfsacl_version3 = {
.number = 3,
- .nrprocs = sizeof(nfs3_acl_procedures)/
- sizeof(nfs3_acl_procedures[0]),
+ .nrprocs = ARRAY_SIZE(nfs3_acl_procedures),
.procs = nfs3_acl_procedures,
+ .counts = nfs3_acl_counts,
};
#endif /* CONFIG_NFS_V3_ACL */
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 319a47db218d..6c2db51e67a7 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -146,7 +146,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
loff_t pos_src = args->src_pos;
loff_t pos_dst = args->dst_pos;
size_t count = args->count;
- int status;
+ ssize_t status;
status = nfs4_set_rw_stateid(&args->src_stateid, src_lock->open_context,
src_lock, FMODE_READ);
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 528362f69cc1..5ee1b0f0d904 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -112,7 +112,7 @@
decode_getattr_maxsz)
static void encode_fallocate(struct xdr_stream *xdr,
- struct nfs42_falloc_args *args)
+ const struct nfs42_falloc_args *args)
{
encode_nfs4_stateid(xdr, &args->falloc_stateid);
encode_uint64(xdr, args->falloc_offset);
@@ -120,7 +120,7 @@ static void encode_fallocate(struct xdr_stream *xdr,
}
static void encode_allocate(struct xdr_stream *xdr,
- struct nfs42_falloc_args *args,
+ const struct nfs42_falloc_args *args,
struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_ALLOCATE, decode_allocate_maxsz, hdr);
@@ -128,7 +128,7 @@ static void encode_allocate(struct xdr_stream *xdr,
}
static void encode_copy(struct xdr_stream *xdr,
- struct nfs42_copy_args *args,
+ const struct nfs42_copy_args *args,
struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_COPY, decode_copy_maxsz, hdr);
@@ -145,7 +145,7 @@ static void encode_copy(struct xdr_stream *xdr,
}
static void encode_deallocate(struct xdr_stream *xdr,
- struct nfs42_falloc_args *args,
+ const struct nfs42_falloc_args *args,
struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_DEALLOCATE, decode_deallocate_maxsz, hdr);
@@ -153,7 +153,7 @@ static void encode_deallocate(struct xdr_stream *xdr,
}
static void encode_seek(struct xdr_stream *xdr,
- struct nfs42_seek_args *args,
+ const struct nfs42_seek_args *args,
struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_SEEK, decode_seek_maxsz, hdr);
@@ -163,7 +163,7 @@ static void encode_seek(struct xdr_stream *xdr,
}
static void encode_layoutstats(struct xdr_stream *xdr,
- struct nfs42_layoutstat_args *args,
+ const struct nfs42_layoutstat_args *args,
struct nfs42_layoutstat_devinfo *devinfo,
struct compound_hdr *hdr)
{
@@ -191,7 +191,7 @@ static void encode_layoutstats(struct xdr_stream *xdr,
}
static void encode_clone(struct xdr_stream *xdr,
- struct nfs42_clone_args *args,
+ const struct nfs42_clone_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
@@ -210,8 +210,9 @@ static void encode_clone(struct xdr_stream *xdr,
*/
static void nfs4_xdr_enc_allocate(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs42_falloc_args *args)
+ const void *data)
{
+ const struct nfs42_falloc_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -225,7 +226,7 @@ static void nfs4_xdr_enc_allocate(struct rpc_rqst *req,
}
static void encode_copy_commit(struct xdr_stream *xdr,
- struct nfs42_copy_args *args,
+ const struct nfs42_copy_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
@@ -241,8 +242,9 @@ static void encode_copy_commit(struct xdr_stream *xdr,
*/
static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs42_copy_args *args)
+ const void *data)
{
+ const struct nfs42_copy_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -262,8 +264,9 @@ static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs42_falloc_args *args)
+ const void *data)
{
+ const struct nfs42_falloc_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -281,8 +284,9 @@ static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_seek(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs42_seek_args *args)
+ const void *data)
{
+ const struct nfs42_seek_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -299,8 +303,9 @@ static void nfs4_xdr_enc_seek(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_layoutstats(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs42_layoutstat_args *args)
+ const void *data)
{
+ const struct nfs42_layoutstat_args *args = data;
int i;
struct compound_hdr hdr = {
@@ -321,8 +326,9 @@ static void nfs4_xdr_enc_layoutstats(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_clone(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs42_clone_args *args)
+ const void *data)
{
+ const struct nfs42_clone_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -448,8 +454,9 @@ static int decode_clone(struct xdr_stream *xdr)
*/
static int nfs4_xdr_dec_allocate(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs42_falloc_res *res)
+ void *data)
{
+ struct nfs42_falloc_res *res = data;
struct compound_hdr hdr;
int status;
@@ -475,8 +482,9 @@ out:
*/
static int nfs4_xdr_dec_copy(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs42_copy_res *res)
+ void *data)
{
+ struct nfs42_copy_res *res = data;
struct compound_hdr hdr;
int status;
@@ -508,8 +516,9 @@ out:
*/
static int nfs4_xdr_dec_deallocate(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs42_falloc_res *res)
+ void *data)
{
+ struct nfs42_falloc_res *res = data;
struct compound_hdr hdr;
int status;
@@ -535,8 +544,9 @@ out:
*/
static int nfs4_xdr_dec_seek(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs42_seek_res *res)
+ void *data)
{
+ struct nfs42_seek_res *res = data;
struct compound_hdr hdr;
int status;
@@ -559,8 +569,9 @@ out:
*/
static int nfs4_xdr_dec_layoutstats(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs42_layoutstat_res *res)
+ void *data)
{
+ struct nfs42_layoutstat_res *res = data;
struct compound_hdr hdr;
int status, i;
@@ -589,8 +600,9 @@ out:
*/
static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs42_clone_res *res)
+ void *data)
{
+ struct nfs42_clone_res *res = data;
struct compound_hdr hdr;
int status;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index af285cc27ccf..40bd05f05e74 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -493,13 +493,13 @@ static inline void nfs4_unregister_sysctl(void)
#endif
/* nfs4xdr.c */
-extern struct rpc_procinfo nfs4_procedures[];
+extern const struct rpc_procinfo nfs4_procedures[];
struct nfs4_mount_data;
/* callback_xdr.c */
-extern struct svc_version nfs4_callback_version1;
-extern struct svc_version nfs4_callback_version4;
+extern const struct svc_version nfs4_callback_version1;
+extern const struct svc_version nfs4_callback_version4;
static inline void nfs4_stateid_copy(nfs4_stateid *dst, const nfs4_stateid *src)
{
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 66776f022111..50566acb5469 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -414,6 +414,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
if (clp != old)
clp->cl_preserve_clid = true;
nfs_put_client(clp);
+ clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
return old;
error:
@@ -852,6 +853,8 @@ static int nfs4_set_client(struct nfs_server *server,
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
if (server->options & NFS_OPTION_MIGRATION)
set_bit(NFS_CS_MIGRATION, &cl_init.init_flags);
+ if (test_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status))
+ set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags);
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init);
@@ -1212,9 +1215,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
return -EAFNOSUPPORT;
nfs_server_remove_lists(server);
+ set_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
error = nfs4_set_client(server, hostname, sap, salen, buf,
clp->cl_proto, clnt->cl_timeout,
clp->cl_minorversion, net);
+ clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
nfs_put_client(clp);
if (error != 0) {
nfs_server_insert_lists(server);
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 835c163f61af..dd5d27da8c0c 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -364,7 +364,8 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *typ
ret = -EINVAL;
} else {
ret = kstrtol(id_str, 10, &id_long);
- *id = (__u32)id_long;
+ if (!ret)
+ *id = (__u32)id_long;
}
return ret;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 98b0b662af09..a0b4e1091340 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -275,6 +275,7 @@ const u32 nfs4_fs_locations_bitmap[3] = {
static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
struct nfs4_readdir_arg *readdir)
{
+ unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
__be32 *start, *p;
if (cookie > 2) {
@@ -305,8 +306,9 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
memcpy(p, ".\0\0\0", 4); /* entry */
p++;
*p++ = xdr_one; /* bitmap length */
- *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
- *p++ = htonl(8); /* attribute buffer length */
+ *p++ = htonl(attrs); /* bitmap */
+ *p++ = htonl(12); /* attribute buffer length */
+ *p++ = htonl(NF4DIR);
p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
}
@@ -317,8 +319,9 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
memcpy(p, "..\0\0", 4); /* entry */
p++;
*p++ = xdr_one; /* bitmap length */
- *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
- *p++ = htonl(8); /* attribute buffer length */
+ *p++ = htonl(attrs); /* bitmap */
+ *p++ = htonl(12); /* attribute buffer length */
+ *p++ = htonl(NF4DIR);
p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
readdir->pgbase = (char *)p - (char *)start;
@@ -1034,11 +1037,11 @@ struct nfs4_opendata {
struct nfs4_state *state;
struct iattr attrs;
unsigned long timestamp;
- unsigned int rpc_done : 1;
- unsigned int file_created : 1;
- unsigned int is_recover : 1;
+ bool rpc_done;
+ bool file_created;
+ bool is_recover;
+ bool cancelled;
int rpc_status;
- int cancelled;
};
static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
@@ -1962,7 +1965,7 @@ static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
nfs_confirm_seqid(&data->owner->so_seqid, 0);
renew_lease(data->o_res.server, data->timestamp);
- data->rpc_done = 1;
+ data->rpc_done = true;
}
}
@@ -1972,7 +1975,7 @@ static void nfs4_open_confirm_release(void *calldata)
struct nfs4_state *state = NULL;
/* If this request hasn't been cancelled, do nothing */
- if (data->cancelled == 0)
+ if (!data->cancelled)
goto out_free;
/* In case of error, no cleanup! */
if (!data->rpc_done)
@@ -2015,7 +2018,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
kref_get(&data->kref);
- data->rpc_done = 0;
+ data->rpc_done = false;
data->rpc_status = 0;
data->timestamp = jiffies;
if (data->is_recover)
@@ -2025,7 +2028,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
return PTR_ERR(task);
status = rpc_wait_for_completion_task(task);
if (status != 0) {
- data->cancelled = 1;
+ data->cancelled = true;
smp_wmb();
} else
status = data->rpc_status;
@@ -2124,7 +2127,7 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
nfs_confirm_seqid(&data->owner->so_seqid, 0);
}
- data->rpc_done = 1;
+ data->rpc_done = true;
}
static void nfs4_open_release(void *calldata)
@@ -2133,7 +2136,7 @@ static void nfs4_open_release(void *calldata)
struct nfs4_state *state = NULL;
/* If this request hasn't been cancelled, do nothing */
- if (data->cancelled == 0)
+ if (!data->cancelled)
goto out_free;
/* In case of error, no cleanup! */
if (data->rpc_status != 0 || !data->rpc_done)
@@ -2179,20 +2182,20 @@ static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
kref_get(&data->kref);
- data->rpc_done = 0;
+ data->rpc_done = false;
data->rpc_status = 0;
- data->cancelled = 0;
- data->is_recover = 0;
+ data->cancelled = false;
+ data->is_recover = false;
if (isrecover) {
nfs4_set_sequence_privileged(&o_arg->seq_args);
- data->is_recover = 1;
+ data->is_recover = true;
}
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = rpc_wait_for_completion_task(task);
if (status != 0) {
- data->cancelled = 1;
+ data->cancelled = true;
smp_wmb();
} else
status = data->rpc_status;
@@ -2287,9 +2290,9 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
if (o_arg->open_flags & O_CREAT) {
if (o_arg->open_flags & O_EXCL)
- data->file_created = 1;
+ data->file_created = true;
else if (o_res->cinfo.before != o_res->cinfo.after)
- data->file_created = 1;
+ data->file_created = true;
if (data->file_created || dir->i_version != o_res->cinfo.after)
update_changeattr(dir, &o_res->cinfo,
o_res->f_attr->time_start);
@@ -3803,6 +3806,54 @@ nfs4_proc_lookup_mountpoint(struct inode *dir, const struct qstr *name,
return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
}
+static int _nfs4_proc_lookupp(struct inode *inode,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr,
+ struct nfs4_label *label)
+{
+ struct rpc_clnt *clnt = NFS_CLIENT(inode);
+ struct nfs_server *server = NFS_SERVER(inode);
+ int status;
+ struct nfs4_lookupp_arg args = {
+ .bitmask = server->attr_bitmask,
+ .fh = NFS_FH(inode),
+ };
+ struct nfs4_lookupp_res res = {
+ .server = server,
+ .fattr = fattr,
+ .label = label,
+ .fh = fhandle,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ args.bitmask = nfs4_bitmask(server, label);
+
+ nfs_fattr_init(fattr);
+
+ dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
+ status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
+ &res.seq_res, 0);
+ dprintk("NFS reply lookupp: %d\n", status);
+ return status;
+}
+
+static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr, struct nfs4_label *label)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = _nfs4_proc_lookupp(inode, fhandle, fattr, label);
+ trace_nfs4_lookupp(inode, err);
+ err = nfs4_handle_exception(NFS_SERVER(inode), err,
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
struct nfs_server *server = NFS_SERVER(inode);
@@ -4273,7 +4324,7 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
}
static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
- u64 cookie, struct page **pages, unsigned int count, int plus)
+ u64 cookie, struct page **pages, unsigned int count, bool plus)
{
struct inode *dir = d_inode(dentry);
struct nfs4_readdir_arg args = {
@@ -4311,7 +4362,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
}
static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
- u64 cookie, struct page **pages, unsigned int count, int plus)
+ u64 cookie, struct page **pages, unsigned int count, bool plus)
{
struct nfs4_exception exception = { };
int err;
@@ -6135,7 +6186,7 @@ static void nfs4_lock_release(void *calldata)
dprintk("%s: begin!\n", __func__);
nfs_free_seqid(data->arg.open_seqid);
- if (data->cancelled != 0) {
+ if (data->cancelled) {
struct rpc_task *task;
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
data->arg.lock_seqid);
@@ -6218,7 +6269,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
nfs4_handle_setlk_error(data->server, data->lsp,
data->arg.new_lock_owner, ret);
} else
- data->cancelled = 1;
+ data->cancelled = true;
rpc_put_task(task);
dprintk("%s: done, ret = %d!\n", __func__, ret);
trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
@@ -7376,12 +7427,11 @@ static void nfs4_exchange_id_done(struct rpc_task *task, void *data)
if (status == 0) {
clp->cl_clientid = cdata->res.clientid;
clp->cl_exchange_flags = cdata->res.flags;
+ clp->cl_seqid = cdata->res.seqid;
/* Client ID is not confirmed */
- if (!(cdata->res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
+ if (!(cdata->res.flags & EXCHGID4_FLAG_CONFIRMED_R))
clear_bit(NFS4_SESSION_ESTABLISHED,
- &clp->cl_session->session_state);
- clp->cl_seqid = cdata->res.seqid;
- }
+ &clp->cl_session->session_state);
kfree(clp->cl_serverowner);
clp->cl_serverowner = cdata->res.server_owner;
@@ -9313,6 +9363,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
.lookup = nfs4_proc_lookup,
+ .lookupp = nfs4_proc_lookupp,
.access = nfs4_proc_access,
.readlink = nfs4_proc_readlink,
.create = nfs4_proc_create,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index cbf82b0d4467..0378e2257ca7 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -352,11 +352,17 @@ int nfs41_discover_server_trunking(struct nfs_client *clp,
if (clp != *result)
return 0;
- /* Purge state if the client id was established in a prior instance */
- if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R)
- set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
- else
- set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ /*
+ * Purge state if the client id was established in a prior
+ * instance and the client id could not have arrived on the
+ * server via Transparent State Migration.
+ */
+ if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R) {
+ if (!test_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags))
+ set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+ else
+ set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ }
nfs4_schedule_state_manager(clp);
status = nfs_wait_client_init_complete(clp);
if (status < 0)
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 845d0eadefc9..be1da19c65d6 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -891,6 +891,35 @@ DEFINE_NFS4_LOOKUP_EVENT(nfs4_remove);
DEFINE_NFS4_LOOKUP_EVENT(nfs4_get_fs_locations);
DEFINE_NFS4_LOOKUP_EVENT(nfs4_secinfo);
+TRACE_EVENT(nfs4_lookupp,
+ TP_PROTO(
+ const struct inode *inode,
+ int error
+ ),
+
+ TP_ARGS(inode, error),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = NFS_FILEID(inode);
+ __entry->error = error;
+ ),
+
+ TP_printk(
+ "error=%d (%s) inode=%02x:%02x:%llu",
+ __entry->error,
+ show_nfsv4_errors(__entry->error),
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->ino
+ )
+);
+
TRACE_EVENT(nfs4_rename,
TP_PROTO(
const struct inode *olddir,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 3aebfdc82b30..fa3eb361d4f8 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -159,6 +159,8 @@ static int nfs4_stat_to_errno(int);
(op_decode_hdr_maxsz)
#define encode_lookup_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
#define decode_lookup_maxsz (op_decode_hdr_maxsz)
+#define encode_lookupp_maxsz (op_encode_hdr_maxsz)
+#define decode_lookupp_maxsz (op_decode_hdr_maxsz)
#define encode_share_access_maxsz \
(2)
#define encode_createmode_maxsz (1 + encode_attrs_maxsz + encode_verifier_maxsz)
@@ -618,6 +620,18 @@ static int nfs4_stat_to_errno(int);
decode_lookup_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
+#define NFS4_enc_lookupp_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_lookupp_maxsz + \
+ encode_getattr_maxsz + \
+ encode_getfh_maxsz)
+#define NFS4_dec_lookupp_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_lookupp_maxsz + \
+ decode_getattr_maxsz + \
+ decode_getfh_maxsz)
#define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putrootfh_maxsz + \
@@ -1368,6 +1382,11 @@ static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struc
encode_string(xdr, name->len, name->name);
}
+static void encode_lookupp(struct xdr_stream *xdr, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_LOOKUPP, decode_lookupp_maxsz, hdr);
+}
+
static void encode_share_access(struct xdr_stream *xdr, u32 share_access)
{
__be32 *p;
@@ -1651,7 +1670,8 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
}
static void
-encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr)
+encode_setacl(struct xdr_stream *xdr, const struct nfs_setaclargs *arg,
+ struct compound_hdr *hdr)
{
__be32 *p;
@@ -1735,7 +1755,7 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru
#if defined(CONFIG_NFS_V4_1)
/* NFSv4.1 operations */
static void encode_bind_conn_to_session(struct xdr_stream *xdr,
- struct nfs41_bind_conn_to_session_args *args,
+ const struct nfs41_bind_conn_to_session_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
@@ -1748,7 +1768,7 @@ static void encode_bind_conn_to_session(struct xdr_stream *xdr,
*p = (args->use_conn_in_rdma_mode) ? cpu_to_be32(1) : cpu_to_be32(0);
}
-static void encode_op_map(struct xdr_stream *xdr, struct nfs4_op_map *op_map)
+static void encode_op_map(struct xdr_stream *xdr, const struct nfs4_op_map *op_map)
{
unsigned int i;
encode_uint32(xdr, NFS4_OP_MAP_NUM_WORDS);
@@ -1757,7 +1777,7 @@ static void encode_op_map(struct xdr_stream *xdr, struct nfs4_op_map *op_map)
}
static void encode_exchange_id(struct xdr_stream *xdr,
- struct nfs41_exchange_id_args *args,
+ const struct nfs41_exchange_id_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
@@ -1809,7 +1829,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
}
static void encode_create_session(struct xdr_stream *xdr,
- struct nfs41_create_session_args *args,
+ const struct nfs41_create_session_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
@@ -1862,7 +1882,7 @@ static void encode_create_session(struct xdr_stream *xdr,
}
static void encode_destroy_session(struct xdr_stream *xdr,
- struct nfs4_session *session,
+ const struct nfs4_session *session,
struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_DESTROY_SESSION, decode_destroy_session_maxsz, hdr);
@@ -1878,7 +1898,7 @@ static void encode_destroy_clientid(struct xdr_stream *xdr,
}
static void encode_reclaim_complete(struct xdr_stream *xdr,
- struct nfs41_reclaim_complete_args *args,
+ const struct nfs41_reclaim_complete_args *args,
struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_RECLAIM_COMPLETE, decode_reclaim_complete_maxsz, hdr);
@@ -1974,7 +1994,7 @@ encode_layoutget(struct xdr_stream *xdr,
static int
encode_layoutcommit(struct xdr_stream *xdr,
struct inode *inode,
- struct nfs4_layoutcommit_args *args,
+ const struct nfs4_layoutcommit_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
@@ -2044,7 +2064,7 @@ encode_secinfo_no_name(struct xdr_stream *xdr,
}
static void encode_test_stateid(struct xdr_stream *xdr,
- struct nfs41_test_stateid_args *args,
+ const struct nfs41_test_stateid_args *args,
struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_TEST_STATEID, decode_test_stateid_maxsz, hdr);
@@ -2053,7 +2073,7 @@ static void encode_test_stateid(struct xdr_stream *xdr,
}
static void encode_free_stateid(struct xdr_stream *xdr,
- struct nfs41_free_stateid_args *args,
+ const struct nfs41_free_stateid_args *args,
struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_FREE_STATEID, decode_free_stateid_maxsz, hdr);
@@ -2086,8 +2106,9 @@ static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
* Encode an ACCESS request
*/
static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs4_accessargs *args)
+ const void *data)
{
+ const struct nfs4_accessargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2104,8 +2125,9 @@ static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode LOOKUP request
*/
static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs4_lookup_arg *args)
+ const void *data)
{
+ const struct nfs4_lookup_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2120,12 +2142,33 @@ static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
}
/*
+ * Encode LOOKUPP request
+ */
+static void nfs4_xdr_enc_lookupp(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs4_lookupp_arg *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_lookupp(xdr, &hdr);
+ encode_getfh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* Encode LOOKUP_ROOT request
*/
static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs4_lookup_root_arg *args)
+ const void *data)
{
+ const struct nfs4_lookup_root_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2142,8 +2185,9 @@ static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
* Encode REMOVE request
*/
static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs_removeargs *args)
+ const void *data)
{
+ const struct nfs_removeargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2159,8 +2203,9 @@ static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode RENAME request
*/
static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs_renameargs *args)
+ const void *data)
{
+ const struct nfs_renameargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2178,8 +2223,9 @@ static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode LINK request
*/
static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs4_link_arg *args)
+ const void *data)
{
+ const struct nfs4_link_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2199,8 +2245,9 @@ static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode CREATE request
*/
static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs4_create_arg *args)
+ const void *data)
{
+ const struct nfs4_create_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2218,8 +2265,10 @@ static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode SYMLINK request
*/
static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs4_create_arg *args)
+ const void *data)
{
+ const struct nfs4_create_arg *args = data;
+
nfs4_xdr_enc_create(req, xdr, args);
}
@@ -2227,8 +2276,9 @@ static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode GETATTR request
*/
static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs4_getattr_arg *args)
+ const void *data)
{
+ const struct nfs4_getattr_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2244,8 +2294,9 @@ static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode a CLOSE request
*/
static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_closeargs *args)
+ const void *data)
{
+ const struct nfs_closeargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2265,8 +2316,9 @@ static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode an OPEN request
*/
static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_openargs *args)
+ const void *data)
{
+ const struct nfs_openargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2287,8 +2339,9 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
*/
static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_open_confirmargs *args)
+ const void *data)
{
+ const struct nfs_open_confirmargs *args = data;
struct compound_hdr hdr = {
.nops = 0,
};
@@ -2304,8 +2357,9 @@ static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_openargs *args)
+ const void *data)
{
+ const struct nfs_openargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2325,8 +2379,9 @@ static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_closeargs *args)
+ const void *data)
{
+ const struct nfs_closeargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2344,8 +2399,9 @@ static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
* Encode a LOCK request
*/
static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_lock_args *args)
+ const void *data)
{
+ const struct nfs_lock_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2361,8 +2417,9 @@ static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode a LOCKT request
*/
static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_lockt_args *args)
+ const void *data)
{
+ const struct nfs_lockt_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2378,8 +2435,9 @@ static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode a LOCKU request
*/
static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_locku_args *args)
+ const void *data)
{
+ const struct nfs_locku_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2393,8 +2451,9 @@ static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_release_lockowner_args *args)
+ const void *data)
{
+ const struct nfs_release_lockowner_args *args = data;
struct compound_hdr hdr = {
.minorversion = 0,
};
@@ -2408,8 +2467,9 @@ static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
* Encode a READLINK request
*/
static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs4_readlink *args)
+ const void *data)
{
+ const struct nfs4_readlink *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2428,8 +2488,9 @@ static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode a READDIR request
*/
static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs4_readdir_arg *args)
+ const void *data)
{
+ const struct nfs4_readdir_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2451,8 +2512,9 @@ static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode a READ request
*/
static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_pgio_args *args)
+ const void *data)
{
+ const struct nfs_pgio_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2472,8 +2534,9 @@ static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode an SETATTR request
*/
static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_setattrargs *args)
+ const void *data)
{
+ const struct nfs_setattrargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2490,8 +2553,9 @@ static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode a GETACL request
*/
static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_getaclargs *args)
+ const void *data)
{
+ const struct nfs_getaclargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2513,8 +2577,9 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
* Encode a WRITE request
*/
static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_pgio_args *args)
+ const void *data)
{
+ const struct nfs_pgio_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2533,8 +2598,9 @@ static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
* a COMMIT request
*/
static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_commitargs *args)
+ const void *data)
{
+ const struct nfs_commitargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2550,8 +2616,9 @@ static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
* FSINFO request
*/
static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs4_fsinfo_arg *args)
+ const void *data)
{
+ const struct nfs4_fsinfo_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2567,8 +2634,9 @@ static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
* a PATHCONF request
*/
static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs4_pathconf_arg *args)
+ const void *data)
{
+ const struct nfs4_pathconf_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2585,8 +2653,9 @@ static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
* a STATFS request
*/
static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfs4_statfs_arg *args)
+ const void *data)
{
+ const struct nfs4_statfs_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2604,8 +2673,9 @@ static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
*/
static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_server_caps_arg *args)
+ const void *data)
{
+ const struct nfs4_server_caps_arg *args = data;
const u32 *bitmask = args->bitmask;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
@@ -2622,8 +2692,10 @@ static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
* a RENEW request
*/
static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_client *clp)
+ const void *data)
+
{
+ const struct nfs_client *clp = data;
struct compound_hdr hdr = {
.nops = 0,
};
@@ -2638,8 +2710,9 @@ static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
*/
static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_setclientid *sc)
+ const void *data)
{
+ const struct nfs4_setclientid *sc = data;
struct compound_hdr hdr = {
.nops = 0,
};
@@ -2654,8 +2727,9 @@ static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_setclientid_res *arg)
+ const void *data)
{
+ const struct nfs4_setclientid_res *arg = data;
struct compound_hdr hdr = {
.nops = 0,
};
@@ -2670,8 +2744,9 @@ static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs4_delegreturnargs *args)
+ const void *data)
{
+ const struct nfs4_delegreturnargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2692,8 +2767,9 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_fs_locations_arg *args)
+ const void *data)
{
+ const struct nfs4_fs_locations_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2715,8 +2791,8 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
}
/* Set up reply kvec to capture returned fs_locations array. */
- xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
- 0, PAGE_SIZE);
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
+ (struct page **)&args->page, 0, PAGE_SIZE);
encode_nops(&hdr);
}
@@ -2725,8 +2801,9 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_secinfo_arg *args)
+ const void *data)
{
+ const struct nfs4_secinfo_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2743,8 +2820,9 @@ static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_fsid_present_arg *args)
+ const void *data)
{
+ const struct nfs4_fsid_present_arg *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2764,8 +2842,9 @@ static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs41_bind_conn_to_session_args *args)
+ const void *data)
{
+ const struct nfs41_bind_conn_to_session_args *args = data;
struct compound_hdr hdr = {
.minorversion = args->client->cl_mvops->minor_version,
};
@@ -2780,8 +2859,9 @@ static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs41_exchange_id_args *args)
+ const void *data)
{
+ const struct nfs41_exchange_id_args *args = data;
struct compound_hdr hdr = {
.minorversion = args->client->cl_mvops->minor_version,
};
@@ -2796,8 +2876,9 @@ static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs41_create_session_args *args)
+ const void *data)
{
+ const struct nfs41_create_session_args *args = data;
struct compound_hdr hdr = {
.minorversion = args->client->cl_mvops->minor_version,
};
@@ -2812,8 +2893,9 @@ static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_session *session)
+ const void *data)
{
+ const struct nfs4_session *session = data;
struct compound_hdr hdr = {
.minorversion = session->clp->cl_mvops->minor_version,
};
@@ -2828,8 +2910,9 @@ static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_destroy_clientid(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_client *clp)
+ const void *data)
{
+ const struct nfs_client *clp = data;
struct compound_hdr hdr = {
.minorversion = clp->cl_mvops->minor_version,
};
@@ -2843,8 +2926,9 @@ static void nfs4_xdr_enc_destroy_clientid(struct rpc_rqst *req,
* a SEQUENCE request
*/
static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs4_sequence_args *args)
+ const void *data)
{
+ const struct nfs4_sequence_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(args),
};
@@ -2859,8 +2943,9 @@ static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
*/
static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_get_lease_time_args *args)
+ const void *data)
{
+ const struct nfs4_get_lease_time_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
};
@@ -2878,8 +2963,9 @@ static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs41_reclaim_complete_args *args)
+ const void *data)
{
+ const struct nfs41_reclaim_complete_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args)
};
@@ -2895,8 +2981,9 @@ static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_getdeviceinfo_args *args)
+ const void *data)
{
+ const struct nfs4_getdeviceinfo_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2919,8 +3006,9 @@ static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_layoutget_args *args)
+ const void *data)
{
+ const struct nfs4_layoutget_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2941,8 +3029,9 @@ static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_layoutcommit_args *args)
+ const void *priv)
{
+ const struct nfs4_layoutcommit_args *args = priv;
struct nfs4_layoutcommit_data *data =
container_of(args, struct nfs4_layoutcommit_data, args);
struct compound_hdr hdr = {
@@ -2962,8 +3051,9 @@ static void nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_layoutreturn_args *args)
+ const void *data)
{
+ const struct nfs4_layoutreturn_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2978,10 +3068,11 @@ static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req,
/*
* Encode SECINFO_NO_NAME request
*/
-static int nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req,
+static void nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs41_secinfo_no_name_args *args)
+ const void *data)
{
+ const struct nfs41_secinfo_no_name_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -2991,7 +3082,6 @@ static int nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req,
encode_putrootfh(xdr, &hdr);
encode_secinfo_no_name(xdr, args, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
@@ -2999,8 +3089,9 @@ static int nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_test_stateid(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs41_test_stateid_args *args)
+ const void *data)
{
+ const struct nfs41_test_stateid_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -3016,8 +3107,9 @@ static void nfs4_xdr_enc_test_stateid(struct rpc_rqst *req,
*/
static void nfs4_xdr_enc_free_stateid(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs41_free_stateid_args *args)
+ const void *data)
{
+ const struct nfs41_free_stateid_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -5005,6 +5097,11 @@ static int decode_lookup(struct xdr_stream *xdr)
return decode_op_hdr(xdr, OP_LOOKUP);
}
+static int decode_lookupp(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_LOOKUPP);
+}
+
/* This is too sick! */
static int decode_space_limit(struct xdr_stream *xdr,
unsigned long *pagemod_limit)
@@ -6101,8 +6198,9 @@ int decode_layoutreturn(struct xdr_stream *xdr,
*/
static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs_closeres *res)
+ void *data)
{
+ struct nfs_closeres *res = data;
struct compound_hdr hdr;
int status;
@@ -6130,8 +6228,9 @@ out:
* Decode ACCESS response
*/
static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs4_accessres *res)
+ void *data)
{
+ struct nfs4_accessres *res = data;
struct compound_hdr hdr;
int status;
@@ -6156,8 +6255,9 @@ out:
* Decode LOOKUP response
*/
static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs4_lookup_res *res)
+ void *data)
{
+ struct nfs4_lookup_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6182,12 +6282,43 @@ out:
}
/*
+ * Decode LOOKUPP response
+ */
+static int nfs4_xdr_dec_lookupp(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfs4_lookupp_res *res = data;
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_lookupp(xdr);
+ if (status)
+ goto out;
+ status = decode_getfh(xdr, res->fh);
+ if (status)
+ goto out;
+ status = decode_getfattr_label(xdr, res->fattr, res->label, res->server);
+out:
+ return status;
+}
+
+/*
* Decode LOOKUP_ROOT response
*/
static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_lookup_res *res)
+ void *data)
{
+ struct nfs4_lookup_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6212,8 +6343,9 @@ out:
* Decode REMOVE response
*/
static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_removeres *res)
+ void *data)
{
+ struct nfs_removeres *res = data;
struct compound_hdr hdr;
int status;
@@ -6235,8 +6367,9 @@ out:
* Decode RENAME response
*/
static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_renameres *res)
+ void *data)
{
+ struct nfs_renameres *res = data;
struct compound_hdr hdr;
int status;
@@ -6264,8 +6397,9 @@ out:
* Decode LINK response
*/
static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs4_link_res *res)
+ void *data)
{
+ struct nfs4_link_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6303,8 +6437,9 @@ out:
* Decode CREATE response
*/
static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs4_create_res *res)
+ void *data)
{
+ struct nfs4_create_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6332,7 +6467,7 @@ out:
* Decode SYMLINK response
*/
static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs4_create_res *res)
+ void *res)
{
return nfs4_xdr_dec_create(rqstp, xdr, res);
}
@@ -6341,8 +6476,9 @@ static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
* Decode GETATTR response
*/
static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs4_getattr_res *res)
+ void *data)
{
+ struct nfs4_getattr_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6364,8 +6500,9 @@ out:
* Encode an SETACL request
*/
static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_setaclargs *args)
+ const void *data)
{
+ const struct nfs_setaclargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
@@ -6382,8 +6519,9 @@ static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
*/
static int
nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_setaclres *res)
+ void *data)
{
+ struct nfs_setaclres *res = data;
struct compound_hdr hdr;
int status;
@@ -6406,8 +6544,9 @@ out:
*/
static int
nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_getaclres *res)
+ void *data)
{
+ struct nfs_getaclres *res = data;
struct compound_hdr hdr;
int status;
@@ -6434,8 +6573,9 @@ out:
* Decode CLOSE response
*/
static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_closeres *res)
+ void *data)
{
+ struct nfs_closeres *res = data;
struct compound_hdr hdr;
int status;
@@ -6468,8 +6608,9 @@ out:
* Decode OPEN response
*/
static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_openres *res)
+ void *data)
{
+ struct nfs_openres *res = data;
struct compound_hdr hdr;
int status;
@@ -6500,8 +6641,9 @@ out:
*/
static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs_open_confirmres *res)
+ void *data)
{
+ struct nfs_open_confirmres *res = data;
struct compound_hdr hdr;
int status;
@@ -6521,8 +6663,9 @@ out:
*/
static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs_openres *res)
+ void *data)
{
+ struct nfs_openres *res = data;
struct compound_hdr hdr;
int status;
@@ -6550,8 +6693,9 @@ out:
*/
static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs_setattrres *res)
+ void *data)
{
+ struct nfs_setattrres *res = data;
struct compound_hdr hdr;
int status;
@@ -6576,8 +6720,9 @@ out:
* Decode LOCK response
*/
static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_lock_res *res)
+ void *data)
{
+ struct nfs_lock_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6599,8 +6744,9 @@ out:
* Decode LOCKT response
*/
static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_lockt_res *res)
+ void *data)
{
+ struct nfs_lockt_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6622,8 +6768,9 @@ out:
* Decode LOCKU response
*/
static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_locku_res *res)
+ void *data)
{
+ struct nfs_locku_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6658,8 +6805,9 @@ static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
*/
static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_readlink_res *res)
+ void *data)
{
+ struct nfs4_readlink_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6681,8 +6829,9 @@ out:
* Decode READDIR response
*/
static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs4_readdir_res *res)
+ void *data)
{
+ struct nfs4_readdir_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6704,8 +6853,9 @@ out:
* Decode Read response
*/
static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_pgio_res *res)
+ void *data)
{
+ struct nfs_pgio_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6730,8 +6880,9 @@ out:
* Decode WRITE response
*/
static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_pgio_res *res)
+ void *data)
{
+ struct nfs_pgio_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6760,8 +6911,9 @@ out:
* Decode COMMIT response
*/
static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_commitres *res)
+ void *data)
{
+ struct nfs_commitres *res = data;
struct compound_hdr hdr;
int status;
@@ -6784,8 +6936,9 @@ out:
* Decode FSINFO response
*/
static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs4_fsinfo_res *res)
+ void *data)
{
+ struct nfs4_fsinfo_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6803,8 +6956,9 @@ static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
* Decode PATHCONF response
*/
static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs4_pathconf_res *res)
+ void *data)
{
+ struct nfs4_pathconf_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6822,8 +6976,9 @@ static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
* Decode STATFS response
*/
static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs4_statfs_res *res)
+ void *data)
{
+ struct nfs4_statfs_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6842,8 +6997,9 @@ static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
*/
static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_server_caps_res *res)
+ void *data)
{
+ struct nfs4_server_caps_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6881,8 +7037,9 @@ static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
*/
static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_setclientid_res *res)
+ void *data)
{
+ struct nfs4_setclientid_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6896,7 +7053,8 @@ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
* Decode SETCLIENTID_CONFIRM response
*/
static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
- struct xdr_stream *xdr)
+ struct xdr_stream *xdr,
+ void *data)
{
struct compound_hdr hdr;
int status;
@@ -6912,8 +7070,9 @@ static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
*/
static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_delegreturnres *res)
+ void *data)
{
+ struct nfs4_delegreturnres *res = data;
struct compound_hdr hdr;
int status;
@@ -6947,8 +7106,9 @@ out:
*/
static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs4_fs_locations_res *res)
+ void *data)
{
+ struct nfs4_fs_locations_res *res = data;
struct compound_hdr hdr;
int status;
@@ -6990,8 +7150,9 @@ out:
*/
static int nfs4_xdr_dec_secinfo(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_secinfo_res *res)
+ void *data)
{
+ struct nfs4_secinfo_res *res = data;
struct compound_hdr hdr;
int status;
@@ -7014,8 +7175,9 @@ out:
*/
static int nfs4_xdr_dec_fsid_present(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_fsid_present_res *res)
+ void *data)
{
+ struct nfs4_fsid_present_res *res = data;
struct compound_hdr hdr;
int status;
@@ -7075,7 +7237,7 @@ static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
*/
static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs41_create_session_res *res)
+ void *res)
{
struct compound_hdr hdr;
int status;
@@ -7123,7 +7285,7 @@ static int nfs4_xdr_dec_destroy_clientid(struct rpc_rqst *rqstp,
*/
static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_sequence_res *res)
+ void *res)
{
struct compound_hdr hdr;
int status;
@@ -7139,8 +7301,9 @@ static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
*/
static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_get_lease_time_res *res)
+ void *data)
{
+ struct nfs4_get_lease_time_res *res = data;
struct compound_hdr hdr;
int status;
@@ -7159,8 +7322,9 @@ static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
*/
static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs41_reclaim_complete_res *res)
+ void *data)
{
+ struct nfs41_reclaim_complete_res *res = data;
struct compound_hdr hdr;
int status;
@@ -7177,8 +7341,9 @@ static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
*/
static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_getdeviceinfo_res *res)
+ void *data)
{
+ struct nfs4_getdeviceinfo_res *res = data;
struct compound_hdr hdr;
int status;
@@ -7198,8 +7363,9 @@ out:
*/
static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_layoutget_res *res)
+ void *data)
{
+ struct nfs4_layoutget_res *res = data;
struct compound_hdr hdr;
int status;
@@ -7222,8 +7388,9 @@ out:
*/
static int nfs4_xdr_dec_layoutreturn(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_layoutreturn_res *res)
+ void *data)
{
+ struct nfs4_layoutreturn_res *res = data;
struct compound_hdr hdr;
int status;
@@ -7246,8 +7413,9 @@ out:
*/
static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_layoutcommit_res *res)
+ void *data)
{
+ struct nfs4_layoutcommit_res *res = data;
struct compound_hdr hdr;
int status;
@@ -7273,8 +7441,9 @@ out:
*/
static int nfs4_xdr_dec_secinfo_no_name(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs4_secinfo_res *res)
+ void *data)
{
+ struct nfs4_secinfo_res *res = data;
struct compound_hdr hdr;
int status;
@@ -7297,8 +7466,9 @@ out:
*/
static int nfs4_xdr_dec_test_stateid(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs41_test_stateid_res *res)
+ void *data)
{
+ struct nfs41_test_stateid_res *res = data;
struct compound_hdr hdr;
int status;
@@ -7318,8 +7488,9 @@ out:
*/
static int nfs4_xdr_dec_free_stateid(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfs41_free_stateid_res *res)
+ void *data)
{
+ struct nfs41_free_stateid_res *res = data;
struct compound_hdr hdr;
int status;
@@ -7350,7 +7521,7 @@ out:
* on a directory already in our cache.
*/
int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
- int plus)
+ bool plus)
{
unsigned int savep;
uint32_t bitmap[3] = {0};
@@ -7484,8 +7655,8 @@ nfs4_stat_to_errno(int stat)
#define PROC(proc, argtype, restype) \
[NFSPROC4_CLNT_##proc] = { \
.p_proc = NFSPROC4_COMPOUND, \
- .p_encode = (kxdreproc_t)nfs4_xdr_##argtype, \
- .p_decode = (kxdrdproc_t)nfs4_xdr_##restype, \
+ .p_encode = nfs4_xdr_##argtype, \
+ .p_decode = nfs4_xdr_##restype, \
.p_arglen = NFS4_##argtype##_sz, \
.p_replen = NFS4_##restype##_sz, \
.p_statidx = NFSPROC4_CLNT_##proc, \
@@ -7497,7 +7668,7 @@ nfs4_stat_to_errno(int stat)
.p_name = #proc, \
}
-struct rpc_procinfo nfs4_procedures[] = {
+const struct rpc_procinfo nfs4_procedures[] = {
PROC(READ, enc_read, dec_read),
PROC(WRITE, enc_write, dec_write),
PROC(COMMIT, enc_commit, dec_commit),
@@ -7517,6 +7688,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(ACCESS, enc_access, dec_access),
PROC(GETATTR, enc_getattr, dec_getattr),
PROC(LOOKUP, enc_lookup, dec_lookup),
+ PROC(LOOKUPP, enc_lookupp, dec_lookupp),
PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root),
PROC(REMOVE, enc_remove, dec_remove),
PROC(RENAME, enc_rename, dec_rename),
@@ -7564,10 +7736,12 @@ struct rpc_procinfo nfs4_procedures[] = {
#endif /* CONFIG_NFS_V4_2 */
};
+static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)];
const struct rpc_version nfs_version4 = {
.number = 4,
.nrprocs = ARRAY_SIZE(nfs4_procedures),
- .procs = nfs4_procedures
+ .procs = nfs4_procedures,
+ .counts = nfs_version4_counts,
};
/*
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ad92b401326c..de9066a92c0d 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -50,8 +50,8 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
hdr->cred = hdr->req->wb_context->cred;
hdr->io_start = req_offset(hdr->req);
hdr->good_bytes = mirror->pg_count;
+ hdr->io_completion = desc->pg_io_completion;
hdr->dreq = desc->pg_dreq;
- hdr->layout_private = desc->pg_layout_private;
hdr->release = release;
hdr->completion_ops = desc->pg_completion_ops;
if (hdr->completion_ops->init_hdr)
@@ -155,9 +155,12 @@ nfs_page_group_lock(struct nfs_page *req, bool nonblock)
if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
return 0;
- if (!nonblock)
+ if (!nonblock) {
+ set_bit(PG_CONTENDED1, &head->wb_flags);
+ smp_mb__after_atomic();
return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
TASK_UNINTERRUPTIBLE);
+ }
return -EAGAIN;
}
@@ -175,6 +178,10 @@ nfs_page_group_lock_wait(struct nfs_page *req)
WARN_ON_ONCE(head != head->wb_head);
+ if (!test_bit(PG_HEADLOCK, &head->wb_flags))
+ return;
+ set_bit(PG_CONTENDED1, &head->wb_flags);
+ smp_mb__after_atomic();
wait_on_bit(&head->wb_flags, PG_HEADLOCK,
TASK_UNINTERRUPTIBLE);
}
@@ -193,6 +200,8 @@ nfs_page_group_unlock(struct nfs_page *req)
smp_mb__before_atomic();
clear_bit(PG_HEADLOCK, &head->wb_flags);
smp_mb__after_atomic();
+ if (!test_bit(PG_CONTENDED1, &head->wb_flags))
+ return;
wake_up_bit(&head->wb_flags, PG_HEADLOCK);
}
@@ -383,6 +392,8 @@ void nfs_unlock_request(struct nfs_page *req)
smp_mb__before_atomic();
clear_bit(PG_BUSY, &req->wb_flags);
smp_mb__after_atomic();
+ if (!test_bit(PG_CONTENDED2, &req->wb_flags))
+ return;
wake_up_bit(&req->wb_flags, PG_BUSY);
}
@@ -465,6 +476,10 @@ void nfs_release_request(struct nfs_page *req)
int
nfs_wait_on_request(struct nfs_page *req)
{
+ if (!test_bit(PG_BUSY, &req->wb_flags))
+ return 0;
+ set_bit(PG_CONTENDED2, &req->wb_flags);
+ smp_mb__after_atomic();
return wait_on_bit_io(&req->wb_flags, PG_BUSY,
TASK_UNINTERRUPTIBLE);
}
@@ -710,8 +725,8 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_ioflags = io_flags;
desc->pg_error = 0;
desc->pg_lseg = NULL;
+ desc->pg_io_completion = NULL;
desc->pg_dreq = NULL;
- desc->pg_layout_private = NULL;
desc->pg_bsize = bsize;
desc->pg_mirror_count = 1;
@@ -779,6 +794,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
gfp_t gfp_flags = GFP_KERNEL;
pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
+ pg_array->npages = pagecount;
if (pagecount <= ARRAY_SIZE(pg_array->page_array))
pg_array->pagevec = pg_array->page_array;
@@ -1233,6 +1249,7 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
{
LIST_HEAD(failed);
+ desc->pg_io_completion = hdr->io_completion;
desc->pg_dreq = hdr->dreq;
while (!list_empty(&hdr->pages)) {
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 9872cf676a50..7962e49097c3 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -485,7 +485,7 @@ nfs_proc_rmdir(struct inode *dir, const struct qstr *name)
*/
static int
nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
- u64 cookie, struct page **pages, unsigned int count, int plus)
+ u64 cookie, struct page **pages, unsigned int count, bool plus)
{
struct inode *dir = d_inode(dentry);
struct nfs_readdirargs arg = {
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index c5334c0e23a1..d828ef88e7db 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -879,7 +879,7 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
if (nfss->options & NFS_OPTION_FSCACHE) {
seq_printf(m, "\n\tfsc:\t");
for (i = 0; i < __NFSIOS_FSCACHEMAX; i++)
- seq_printf(m, "%Lu ", totals.bytes[i]);
+ seq_printf(m, "%Lu ", totals.fscache[i]);
}
#endif
seq_printf(m, "\n");
@@ -2339,6 +2339,7 @@ void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
*/
sb->s_flags |= MS_POSIXACL;
sb->s_time_gran = 1;
+ sb->s_export_op = &nfs_export_ops;
}
nfs_initialise_sb(sb);
@@ -2360,6 +2361,7 @@ static void nfs_clone_super(struct super_block *sb,
sb->s_xattr = old_sb->s_xattr;
sb->s_op = old_sb->s_op;
sb->s_time_gran = 1;
+ sb->s_export_op = old_sb->s_export_op;
if (server->nfs_client->rpc_ops->version != 2) {
/* The VFS shouldn't apply the umask to mode bits. We will do
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 191aa577dd1f..e3949d93085c 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -288,6 +288,19 @@ static void nfs_async_rename_release(void *calldata)
if (d_really_is_positive(data->old_dentry))
nfs_mark_for_revalidate(d_inode(data->old_dentry));
+ /* The result of the rename is unknown. Play it safe by
+ * forcing a new lookup */
+ if (data->cancelled) {
+ spin_lock(&data->old_dir->i_lock);
+ nfs_force_lookup_revalidate(data->old_dir);
+ spin_unlock(&data->old_dir->i_lock);
+ if (data->new_dir != data->old_dir) {
+ spin_lock(&data->new_dir->i_lock);
+ nfs_force_lookup_revalidate(data->new_dir);
+ spin_unlock(&data->new_dir->i_lock);
+ }
+ }
+
dput(data->old_dentry);
dput(data->new_dentry);
iput(data->old_dir);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index db7ba542559e..b1af5dee5e0a 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -40,6 +40,12 @@
#define MIN_POOL_WRITE (32)
#define MIN_POOL_COMMIT (4)
+struct nfs_io_completion {
+ void (*complete)(void *data);
+ void *data;
+ struct kref refcount;
+};
+
/*
* Local function declarations
*/
@@ -108,6 +114,39 @@ static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
mempool_free(hdr, nfs_wdata_mempool);
}
+static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags)
+{
+ return kmalloc(sizeof(struct nfs_io_completion), gfp_flags);
+}
+
+static void nfs_io_completion_init(struct nfs_io_completion *ioc,
+ void (*complete)(void *), void *data)
+{
+ ioc->complete = complete;
+ ioc->data = data;
+ kref_init(&ioc->refcount);
+}
+
+static void nfs_io_completion_release(struct kref *kref)
+{
+ struct nfs_io_completion *ioc = container_of(kref,
+ struct nfs_io_completion, refcount);
+ ioc->complete(ioc->data);
+ kfree(ioc);
+}
+
+static void nfs_io_completion_get(struct nfs_io_completion *ioc)
+{
+ if (ioc != NULL)
+ kref_get(&ioc->refcount);
+}
+
+static void nfs_io_completion_put(struct nfs_io_completion *ioc)
+{
+ if (ioc != NULL)
+ kref_put(&ioc->refcount, nfs_io_completion_release);
+}
+
static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
{
ctx->error = error;
@@ -681,18 +720,29 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
return ret;
}
+static void nfs_io_completion_commit(void *inode)
+{
+ nfs_commit_inode(inode, 0);
+}
+
int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
struct nfs_pageio_descriptor pgio;
+ struct nfs_io_completion *ioc = nfs_io_completion_alloc(GFP_NOFS);
int err;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
+ if (ioc)
+ nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
+
nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
&nfs_async_write_completion_ops);
+ pgio.pg_io_completion = ioc;
err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
nfs_pageio_complete(&pgio);
+ nfs_io_completion_put(ioc);
if (err < 0)
goto out_err;
@@ -940,6 +990,11 @@ int nfs_write_need_commit(struct nfs_pgio_header *hdr)
return hdr->verf.committed != NFS_FILE_SYNC;
}
+static void nfs_async_write_init(struct nfs_pgio_header *hdr)
+{
+ nfs_io_completion_get(hdr->io_completion);
+}
+
static void nfs_write_completion(struct nfs_pgio_header *hdr)
{
struct nfs_commit_info cinfo;
@@ -973,6 +1028,7 @@ next:
nfs_release_request(req);
}
out:
+ nfs_io_completion_put(hdr->io_completion);
hdr->release(hdr);
}
@@ -1378,6 +1434,7 @@ static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
}
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
+ .init_hdr = nfs_async_write_init,
.error_cleanup = nfs_async_write_error,
.completion = nfs_write_completion,
.reschedule_io = nfs_async_write_reschedule_io,
@@ -1884,7 +1941,7 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
/* Don't commit yet if this is a non-blocking flush and there
* are a lot of outstanding writes for this mapping.
*/
- if (nfsi->commit_info.ncommit <= (nfsi->nrequests >> 1))
+ if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
goto out_mark_dirty;
/* don't wait for the COMMIT response */
diff --git a/fs/nfsd/current_stateid.h b/fs/nfsd/current_stateid.h
index 4123551208d8..34075cee573a 100644
--- a/fs/nfsd/current_stateid.h
+++ b/fs/nfsd/current_stateid.h
@@ -8,21 +8,33 @@ extern void clear_current_stateid(struct nfsd4_compound_state *cstate);
/*
* functions to set current state id
*/
-extern void nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *);
-extern void nfsd4_set_openstateid(struct nfsd4_compound_state *, struct nfsd4_open *);
-extern void nfsd4_set_lockstateid(struct nfsd4_compound_state *, struct nfsd4_lock *);
-extern void nfsd4_set_closestateid(struct nfsd4_compound_state *, struct nfsd4_close *);
+extern void nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
+extern void nfsd4_set_openstateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
+extern void nfsd4_set_lockstateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
+extern void nfsd4_set_closestateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
/*
* functions to consume current state id
*/
-extern void nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *);
-extern void nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *, struct nfsd4_delegreturn *);
-extern void nfsd4_get_freestateid(struct nfsd4_compound_state *, struct nfsd4_free_stateid *);
-extern void nfsd4_get_setattrstateid(struct nfsd4_compound_state *, struct nfsd4_setattr *);
-extern void nfsd4_get_closestateid(struct nfsd4_compound_state *, struct nfsd4_close *);
-extern void nfsd4_get_lockustateid(struct nfsd4_compound_state *, struct nfsd4_locku *);
-extern void nfsd4_get_readstateid(struct nfsd4_compound_state *, struct nfsd4_read *);
-extern void nfsd4_get_writestateid(struct nfsd4_compound_state *, struct nfsd4_write *);
+extern void nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
+extern void nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
+extern void nfsd4_get_freestateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
+extern void nfsd4_get_setattrstateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
+extern void nfsd4_get_closestateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
+extern void nfsd4_get_lockustateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
+extern void nfsd4_get_readstateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
+extern void nfsd4_get_writestateid(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
#endif /* _NFSD4_CURRENT_STATE_H */
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index 838f90f3f890..6276ec8608b0 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -19,7 +19,7 @@
* NULL call.
*/
static __be32
-nfsacld_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+nfsacld_proc_null(struct svc_rqst *rqstp)
{
return nfs_ok;
}
@@ -27,9 +27,10 @@ nfsacld_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
/*
* Get the Access and/or Default ACL of a file.
*/
-static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp,
- struct nfsd3_getaclargs *argp, struct nfsd3_getaclres *resp)
+static __be32 nfsacld_proc_getacl(struct svc_rqst *rqstp)
{
+ struct nfsd3_getaclargs *argp = rqstp->rq_argp;
+ struct nfsd3_getaclres *resp = rqstp->rq_resp;
struct posix_acl *acl;
struct inode *inode;
svc_fh *fh;
@@ -87,10 +88,10 @@ fail:
/*
* Set the Access and/or Default ACL of a file.
*/
-static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
- struct nfsd3_setaclargs *argp,
- struct nfsd_attrstat *resp)
+static __be32 nfsacld_proc_setacl(struct svc_rqst *rqstp)
{
+ struct nfsd3_setaclargs *argp = rqstp->rq_argp;
+ struct nfsd_attrstat *resp = rqstp->rq_resp;
struct inode *inode;
svc_fh *fh;
__be32 nfserr = 0;
@@ -141,9 +142,10 @@ out_errno:
/*
* Check file attributes
*/
-static __be32 nfsacld_proc_getattr(struct svc_rqst * rqstp,
- struct nfsd_fhandle *argp, struct nfsd_attrstat *resp)
+static __be32 nfsacld_proc_getattr(struct svc_rqst *rqstp)
{
+ struct nfsd_fhandle *argp = rqstp->rq_argp;
+ struct nfsd_attrstat *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh));
@@ -158,9 +160,10 @@ static __be32 nfsacld_proc_getattr(struct svc_rqst * rqstp,
/*
* Check file access
*/
-static __be32 nfsacld_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp,
- struct nfsd3_accessres *resp)
+static __be32 nfsacld_proc_access(struct svc_rqst *rqstp)
{
+ struct nfsd3_accessargs *argp = rqstp->rq_argp;
+ struct nfsd3_accessres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: ACCESS(2acl) %s 0x%x\n",
@@ -179,9 +182,10 @@ static __be32 nfsacld_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessarg
/*
* XDR decode functions
*/
-static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_getaclargs *argp)
+static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_getaclargs *argp = rqstp->rq_argp;
+
p = nfs2svc_decode_fh(p, &argp->fh);
if (!p)
return 0;
@@ -191,9 +195,9 @@ static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p,
}
-static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_setaclargs *argp)
+static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_setaclargs *argp = rqstp->rq_argp;
struct kvec *head = rqstp->rq_arg.head;
unsigned int base;
int n;
@@ -217,18 +221,20 @@ static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p,
return (n > 0);
}
-static int nfsaclsvc_decode_fhandleargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_fhandle *argp)
+static int nfsaclsvc_decode_fhandleargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_fhandle *argp = rqstp->rq_argp;
+
p = nfs2svc_decode_fh(p, &argp->fh);
if (!p)
return 0;
return xdr_argsize_check(rqstp, p);
}
-static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_accessargs *argp)
+static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_accessargs *argp = rqstp->rq_argp;
+
p = nfs2svc_decode_fh(p, &argp->fh);
if (!p)
return 0;
@@ -245,15 +251,15 @@ static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p,
* There must be an encoding function for void results so svc_process
* will work properly.
*/
-static int nfsaclsvc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfsaclsvc_encode_voidres(struct svc_rqst *rqstp, __be32 *p)
{
return xdr_ressize_check(rqstp, p);
}
/* GETACL */
-static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_getaclres *resp)
+static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_getaclres *resp = rqstp->rq_resp;
struct dentry *dentry = resp->fh.fh_dentry;
struct inode *inode;
struct kvec *head = rqstp->rq_res.head;
@@ -296,17 +302,19 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
return (n > 0);
}
-static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_attrstat *resp)
+static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_attrstat *resp = rqstp->rq_resp;
+
p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
return xdr_ressize_check(rqstp, p);
}
/* ACCESS */
-static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_accessres *resp)
+static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_accessres *resp = rqstp->rq_resp;
+
p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
*p++ = htonl(resp->access);
return xdr_ressize_check(rqstp, p);
@@ -315,27 +323,27 @@ static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p,
/*
* XDR release functions
*/
-static int nfsaclsvc_release_getacl(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_getaclres *resp)
+static void nfsaclsvc_release_getacl(struct svc_rqst *rqstp)
{
+ struct nfsd3_getaclres *resp = rqstp->rq_resp;
+
fh_put(&resp->fh);
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
- return 1;
}
-static int nfsaclsvc_release_attrstat(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_attrstat *resp)
+static void nfsaclsvc_release_attrstat(struct svc_rqst *rqstp)
{
+ struct nfsd_attrstat *resp = rqstp->rq_resp;
+
fh_put(&resp->fh);
- return 1;
}
-static int nfsaclsvc_release_access(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_accessres *resp)
+static void nfsaclsvc_release_access(struct svc_rqst *rqstp)
{
- fh_put(&resp->fh);
- return 1;
+ struct nfsd3_accessres *resp = rqstp->rq_resp;
+
+ fh_put(&resp->fh);
}
#define nfsaclsvc_decode_voidargs NULL
@@ -345,24 +353,24 @@ static int nfsaclsvc_release_access(struct svc_rqst *rqstp, __be32 *p,
#define nfsd3_voidres nfsd3_voidargs
struct nfsd3_voidargs { int dummy; };
-#define PROC(name, argt, rest, relt, cache, respsize) \
- { (svc_procfunc) nfsacld_proc_##name, \
- (kxdrproc_t) nfsaclsvc_decode_##argt##args, \
- (kxdrproc_t) nfsaclsvc_encode_##rest##res, \
- (kxdrproc_t) nfsaclsvc_release_##relt, \
- sizeof(struct nfsd3_##argt##args), \
- sizeof(struct nfsd3_##rest##res), \
- 0, \
- cache, \
- respsize, \
- }
+#define PROC(name, argt, rest, relt, cache, respsize) \
+{ \
+ .pc_func = nfsacld_proc_##name, \
+ .pc_decode = nfsaclsvc_decode_##argt##args, \
+ .pc_encode = nfsaclsvc_encode_##rest##res, \
+ .pc_release = nfsaclsvc_release_##relt, \
+ .pc_argsize = sizeof(struct nfsd3_##argt##args), \
+ .pc_ressize = sizeof(struct nfsd3_##rest##res), \
+ .pc_cachetype = cache, \
+ .pc_xdrressize = respsize, \
+}
#define ST 1 /* status*/
#define AT 21 /* attributes */
#define pAT (1+AT) /* post attributes - conditional */
#define ACL (1+NFS_ACL_MAX_ENTRIES*3) /* Access Control List */
-static struct svc_procedure nfsd_acl_procedures2[] = {
+static const struct svc_procedure nfsd_acl_procedures2[] = {
PROC(null, void, void, void, RC_NOCACHE, ST),
PROC(getacl, getacl, getacl, getacl, RC_NOCACHE, ST+1+2*(1+ACL)),
PROC(setacl, setacl, attrstat, attrstat, RC_NOCACHE, ST+AT),
@@ -370,10 +378,12 @@ static struct svc_procedure nfsd_acl_procedures2[] = {
PROC(access, access, access, access, RC_NOCACHE, ST+AT+1),
};
-struct svc_version nfsd_acl_version2 = {
- .vs_vers = 2,
- .vs_nproc = 5,
- .vs_proc = nfsd_acl_procedures2,
- .vs_dispatch = nfsd_dispatch,
- .vs_xdrsize = NFS3_SVC_XDRSIZE,
+static unsigned int nfsd_acl_count2[ARRAY_SIZE(nfsd_acl_procedures2)];
+const struct svc_version nfsd_acl_version2 = {
+ .vs_vers = 2,
+ .vs_nproc = 5,
+ .vs_proc = nfsd_acl_procedures2,
+ .vs_count = nfsd_acl_count2,
+ .vs_dispatch = nfsd_dispatch,
+ .vs_xdrsize = NFS3_SVC_XDRSIZE,
};
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index dcb5f79076c0..01976529f042 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -18,7 +18,7 @@
* NULL call.
*/
static __be32
-nfsd3_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+nfsd3_proc_null(struct svc_rqst *rqstp)
{
return nfs_ok;
}
@@ -26,9 +26,10 @@ nfsd3_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
/*
* Get the Access and/or Default ACL of a file.
*/
-static __be32 nfsd3_proc_getacl(struct svc_rqst * rqstp,
- struct nfsd3_getaclargs *argp, struct nfsd3_getaclres *resp)
+static __be32 nfsd3_proc_getacl(struct svc_rqst *rqstp)
{
+ struct nfsd3_getaclargs *argp = rqstp->rq_argp;
+ struct nfsd3_getaclres *resp = rqstp->rq_resp;
struct posix_acl *acl;
struct inode *inode;
svc_fh *fh;
@@ -80,10 +81,10 @@ fail:
/*
* Set the Access and/or Default ACL of a file.
*/
-static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
- struct nfsd3_setaclargs *argp,
- struct nfsd3_attrstat *resp)
+static __be32 nfsd3_proc_setacl(struct svc_rqst *rqstp)
{
+ struct nfsd3_setaclargs *argp = rqstp->rq_argp;
+ struct nfsd3_attrstat *resp = rqstp->rq_resp;
struct inode *inode;
svc_fh *fh;
__be32 nfserr = 0;
@@ -123,9 +124,10 @@ out:
/*
* XDR decode functions
*/
-static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_getaclargs *args)
+static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_getaclargs *args = rqstp->rq_argp;
+
p = nfs3svc_decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -135,9 +137,9 @@ static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p,
}
-static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_setaclargs *args)
+static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_setaclargs *args = rqstp->rq_argp;
struct kvec *head = rqstp->rq_arg.head;
unsigned int base;
int n;
@@ -166,9 +168,9 @@ static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p,
*/
/* GETACL */
-static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_getaclres *resp)
+static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_getaclres *resp = rqstp->rq_resp;
struct dentry *dentry = resp->fh.fh_dentry;
p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh);
@@ -211,9 +213,10 @@ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
}
/* SETACL */
-static int nfs3svc_encode_setaclres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_attrstat *resp)
+static int nfs3svc_encode_setaclres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_attrstat *resp = rqstp->rq_resp;
+
p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh);
return xdr_ressize_check(rqstp, p);
@@ -222,13 +225,13 @@ static int nfs3svc_encode_setaclres(struct svc_rqst *rqstp, __be32 *p,
/*
* XDR release functions
*/
-static int nfs3svc_release_getacl(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_getaclres *resp)
+static void nfs3svc_release_getacl(struct svc_rqst *rqstp)
{
+ struct nfsd3_getaclres *resp = rqstp->rq_resp;
+
fh_put(&resp->fh);
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
- return 1;
}
#define nfs3svc_decode_voidargs NULL
@@ -237,34 +240,36 @@ static int nfs3svc_release_getacl(struct svc_rqst *rqstp, __be32 *p,
#define nfsd3_voidres nfsd3_voidargs
struct nfsd3_voidargs { int dummy; };
-#define PROC(name, argt, rest, relt, cache, respsize) \
- { (svc_procfunc) nfsd3_proc_##name, \
- (kxdrproc_t) nfs3svc_decode_##argt##args, \
- (kxdrproc_t) nfs3svc_encode_##rest##res, \
- (kxdrproc_t) nfs3svc_release_##relt, \
- sizeof(struct nfsd3_##argt##args), \
- sizeof(struct nfsd3_##rest##res), \
- 0, \
- cache, \
- respsize, \
- }
+#define PROC(name, argt, rest, relt, cache, respsize) \
+{ \
+ .pc_func = nfsd3_proc_##name, \
+ .pc_decode = nfs3svc_decode_##argt##args, \
+ .pc_encode = nfs3svc_encode_##rest##res, \
+ .pc_release = nfs3svc_release_##relt, \
+ .pc_argsize = sizeof(struct nfsd3_##argt##args), \
+ .pc_ressize = sizeof(struct nfsd3_##rest##res), \
+ .pc_cachetype = cache, \
+ .pc_xdrressize = respsize, \
+}
#define ST 1 /* status*/
#define AT 21 /* attributes */
#define pAT (1+AT) /* post attributes - conditional */
#define ACL (1+NFS_ACL_MAX_ENTRIES*3) /* Access Control List */
-static struct svc_procedure nfsd_acl_procedures3[] = {
+static const struct svc_procedure nfsd_acl_procedures3[] = {
PROC(null, void, void, void, RC_NOCACHE, ST),
PROC(getacl, getacl, getacl, getacl, RC_NOCACHE, ST+1+2*(1+ACL)),
PROC(setacl, setacl, setacl, fhandle, RC_NOCACHE, ST+pAT),
};
-struct svc_version nfsd_acl_version3 = {
- .vs_vers = 3,
- .vs_nproc = 3,
- .vs_proc = nfsd_acl_procedures3,
- .vs_dispatch = nfsd_dispatch,
- .vs_xdrsize = NFS3_SVC_XDRSIZE,
+static unsigned int nfsd_acl_count3[ARRAY_SIZE(nfsd_acl_procedures3)];
+const struct svc_version nfsd_acl_version3 = {
+ .vs_vers = 3,
+ .vs_nproc = 3,
+ .vs_proc = nfsd_acl_procedures3,
+ .vs_count = nfsd_acl_count3,
+ .vs_dispatch = nfsd_dispatch,
+ .vs_xdrsize = NFS3_SVC_XDRSIZE,
};
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 045c9081eabe..2cb56a0d6625 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -31,7 +31,7 @@ static int nfs3_ftypes[] = {
* NULL call.
*/
static __be32
-nfsd3_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+nfsd3_proc_null(struct svc_rqst *rqstp)
{
return nfs_ok;
}
@@ -40,9 +40,10 @@ nfsd3_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
* Get a file's attributes
*/
static __be32
-nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp,
- struct nfsd3_attrstat *resp)
+nfsd3_proc_getattr(struct svc_rqst *rqstp)
{
+ struct nfsd_fhandle *argp = rqstp->rq_argp;
+ struct nfsd3_attrstat *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: GETATTR(3) %s\n",
@@ -63,9 +64,10 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp,
* Set a file's attributes
*/
static __be32
-nfsd3_proc_setattr(struct svc_rqst *rqstp, struct nfsd3_sattrargs *argp,
- struct nfsd3_attrstat *resp)
+nfsd3_proc_setattr(struct svc_rqst *rqstp)
{
+ struct nfsd3_sattrargs *argp = rqstp->rq_argp;
+ struct nfsd3_attrstat *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: SETATTR(3) %s\n",
@@ -81,9 +83,10 @@ nfsd3_proc_setattr(struct svc_rqst *rqstp, struct nfsd3_sattrargs *argp,
* Look up a path name component
*/
static __be32
-nfsd3_proc_lookup(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
- struct nfsd3_diropres *resp)
+nfsd3_proc_lookup(struct svc_rqst *rqstp)
{
+ struct nfsd3_diropargs *argp = rqstp->rq_argp;
+ struct nfsd3_diropres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: LOOKUP(3) %s %.*s\n",
@@ -105,9 +108,10 @@ nfsd3_proc_lookup(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
* Check file access
*/
static __be32
-nfsd3_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp,
- struct nfsd3_accessres *resp)
+nfsd3_proc_access(struct svc_rqst *rqstp)
{
+ struct nfsd3_accessargs *argp = rqstp->rq_argp;
+ struct nfsd3_accessres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: ACCESS(3) %s 0x%x\n",
@@ -124,9 +128,10 @@ nfsd3_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp,
* Read a symlink.
*/
static __be32
-nfsd3_proc_readlink(struct svc_rqst *rqstp, struct nfsd3_readlinkargs *argp,
- struct nfsd3_readlinkres *resp)
+nfsd3_proc_readlink(struct svc_rqst *rqstp)
{
+ struct nfsd3_readlinkargs *argp = rqstp->rq_argp;
+ struct nfsd3_readlinkres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: READLINK(3) %s\n", SVCFH_fmt(&argp->fh));
@@ -142,9 +147,10 @@ nfsd3_proc_readlink(struct svc_rqst *rqstp, struct nfsd3_readlinkargs *argp,
* Read a portion of a file.
*/
static __be32
-nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp,
- struct nfsd3_readres *resp)
+nfsd3_proc_read(struct svc_rqst *rqstp)
{
+ struct nfsd3_readargs *argp = rqstp->rq_argp;
+ struct nfsd3_readres *resp = rqstp->rq_resp;
__be32 nfserr;
u32 max_blocksize = svc_max_payload(rqstp);
unsigned long cnt = min(argp->count, max_blocksize);
@@ -179,9 +185,10 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp,
* Write data to a file
*/
static __be32
-nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp,
- struct nfsd3_writeres *resp)
+nfsd3_proc_write(struct svc_rqst *rqstp)
{
+ struct nfsd3_writeargs *argp = rqstp->rq_argp;
+ struct nfsd3_writeres *resp = rqstp->rq_resp;
__be32 nfserr;
unsigned long cnt = argp->len;
@@ -206,9 +213,10 @@ nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp,
* first reports about SunOS compatibility problems start to pour in...
*/
static __be32
-nfsd3_proc_create(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
- struct nfsd3_diropres *resp)
+nfsd3_proc_create(struct svc_rqst *rqstp)
{
+ struct nfsd3_createargs *argp = rqstp->rq_argp;
+ struct nfsd3_diropres *resp = rqstp->rq_resp;
svc_fh *dirfhp, *newfhp = NULL;
struct iattr *attr;
__be32 nfserr;
@@ -243,9 +251,10 @@ nfsd3_proc_create(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
* Make directory. This operation is not idempotent.
*/
static __be32
-nfsd3_proc_mkdir(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
- struct nfsd3_diropres *resp)
+nfsd3_proc_mkdir(struct svc_rqst *rqstp)
{
+ struct nfsd3_createargs *argp = rqstp->rq_argp;
+ struct nfsd3_diropres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: MKDIR(3) %s %.*s\n",
@@ -263,9 +272,10 @@ nfsd3_proc_mkdir(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
}
static __be32
-nfsd3_proc_symlink(struct svc_rqst *rqstp, struct nfsd3_symlinkargs *argp,
- struct nfsd3_diropres *resp)
+nfsd3_proc_symlink(struct svc_rqst *rqstp)
{
+ struct nfsd3_symlinkargs *argp = rqstp->rq_argp;
+ struct nfsd3_diropres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: SYMLINK(3) %s %.*s -> %.*s\n",
@@ -284,9 +294,10 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp, struct nfsd3_symlinkargs *argp,
* Make socket/fifo/device.
*/
static __be32
-nfsd3_proc_mknod(struct svc_rqst *rqstp, struct nfsd3_mknodargs *argp,
- struct nfsd3_diropres *resp)
+nfsd3_proc_mknod(struct svc_rqst *rqstp)
{
+ struct nfsd3_mknodargs *argp = rqstp->rq_argp;
+ struct nfsd3_diropres *resp = rqstp->rq_resp;
__be32 nfserr;
int type;
dev_t rdev = 0;
@@ -321,9 +332,10 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp, struct nfsd3_mknodargs *argp,
* Remove file/fifo/socket etc.
*/
static __be32
-nfsd3_proc_remove(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
- struct nfsd3_attrstat *resp)
+nfsd3_proc_remove(struct svc_rqst *rqstp)
{
+ struct nfsd3_diropargs *argp = rqstp->rq_argp;
+ struct nfsd3_attrstat *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: REMOVE(3) %s %.*s\n",
@@ -342,9 +354,10 @@ nfsd3_proc_remove(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
* Remove a directory
*/
static __be32
-nfsd3_proc_rmdir(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
- struct nfsd3_attrstat *resp)
+nfsd3_proc_rmdir(struct svc_rqst *rqstp)
{
+ struct nfsd3_diropargs *argp = rqstp->rq_argp;
+ struct nfsd3_attrstat *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: RMDIR(3) %s %.*s\n",
@@ -359,9 +372,10 @@ nfsd3_proc_rmdir(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
}
static __be32
-nfsd3_proc_rename(struct svc_rqst *rqstp, struct nfsd3_renameargs *argp,
- struct nfsd3_renameres *resp)
+nfsd3_proc_rename(struct svc_rqst *rqstp)
{
+ struct nfsd3_renameargs *argp = rqstp->rq_argp;
+ struct nfsd3_renameres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: RENAME(3) %s %.*s ->\n",
@@ -381,9 +395,10 @@ nfsd3_proc_rename(struct svc_rqst *rqstp, struct nfsd3_renameargs *argp,
}
static __be32
-nfsd3_proc_link(struct svc_rqst *rqstp, struct nfsd3_linkargs *argp,
- struct nfsd3_linkres *resp)
+nfsd3_proc_link(struct svc_rqst *rqstp)
{
+ struct nfsd3_linkargs *argp = rqstp->rq_argp;
+ struct nfsd3_linkres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: LINK(3) %s ->\n",
@@ -404,9 +419,10 @@ nfsd3_proc_link(struct svc_rqst *rqstp, struct nfsd3_linkargs *argp,
* Read a portion of a directory.
*/
static __be32
-nfsd3_proc_readdir(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
- struct nfsd3_readdirres *resp)
+nfsd3_proc_readdir(struct svc_rqst *rqstp)
{
+ struct nfsd3_readdirargs *argp = rqstp->rq_argp;
+ struct nfsd3_readdirres *resp = rqstp->rq_resp;
__be32 nfserr;
int count;
@@ -440,9 +456,10 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
* For now, we choose to ignore the dircount parameter.
*/
static __be32
-nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
- struct nfsd3_readdirres *resp)
+nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
{
+ struct nfsd3_readdirargs *argp = rqstp->rq_argp;
+ struct nfsd3_readdirres *resp = rqstp->rq_resp;
__be32 nfserr;
int count = 0;
loff_t offset;
@@ -507,9 +524,10 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
* Get file system stats
*/
static __be32
-nfsd3_proc_fsstat(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
- struct nfsd3_fsstatres *resp)
+nfsd3_proc_fsstat(struct svc_rqst *rqstp)
{
+ struct nfsd_fhandle *argp = rqstp->rq_argp;
+ struct nfsd3_fsstatres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: FSSTAT(3) %s\n",
@@ -524,9 +542,10 @@ nfsd3_proc_fsstat(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
* Get file system info
*/
static __be32
-nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
- struct nfsd3_fsinfores *resp)
+nfsd3_proc_fsinfo(struct svc_rqst *rqstp)
{
+ struct nfsd_fhandle *argp = rqstp->rq_argp;
+ struct nfsd3_fsinfores *resp = rqstp->rq_resp;
__be32 nfserr;
u32 max_blocksize = svc_max_payload(rqstp);
@@ -567,9 +586,10 @@ nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
* Get pathconf info for the specified file
*/
static __be32
-nfsd3_proc_pathconf(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
- struct nfsd3_pathconfres *resp)
+nfsd3_proc_pathconf(struct svc_rqst *rqstp)
{
+ struct nfsd_fhandle *argp = rqstp->rq_argp;
+ struct nfsd3_pathconfres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: PATHCONF(3) %s\n",
@@ -610,9 +630,10 @@ nfsd3_proc_pathconf(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
* Commit a file (range) to stable storage.
*/
static __be32
-nfsd3_proc_commit(struct svc_rqst * rqstp, struct nfsd3_commitargs *argp,
- struct nfsd3_commitres *resp)
+nfsd3_proc_commit(struct svc_rqst *rqstp)
{
+ struct nfsd3_commitargs *argp = rqstp->rq_argp;
+ struct nfsd3_commitres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: COMMIT(3) %s %u@%Lu\n",
@@ -647,233 +668,221 @@ nfsd3_proc_commit(struct svc_rqst * rqstp, struct nfsd3_commitargs *argp,
#define nfsd3_voidres nfsd3_voidargs
struct nfsd3_voidargs { int dummy; };
-#define PROC(name, argt, rest, relt, cache, respsize) \
- { (svc_procfunc) nfsd3_proc_##name, \
- (kxdrproc_t) nfs3svc_decode_##argt##args, \
- (kxdrproc_t) nfs3svc_encode_##rest##res, \
- (kxdrproc_t) nfs3svc_release_##relt, \
- sizeof(struct nfsd3_##argt##args), \
- sizeof(struct nfsd3_##rest##res), \
- 0, \
- cache, \
- respsize, \
- }
-
#define ST 1 /* status*/
#define FH 17 /* filehandle with length */
#define AT 21 /* attributes */
#define pAT (1+AT) /* post attributes - conditional */
#define WC (7+pAT) /* WCC attributes */
-static struct svc_procedure nfsd_procedures3[22] = {
+static const struct svc_procedure nfsd_procedures3[22] = {
[NFS3PROC_NULL] = {
- .pc_func = (svc_procfunc) nfsd3_proc_null,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_voidres,
+ .pc_func = nfsd3_proc_null,
+ .pc_encode = nfs3svc_encode_voidres,
.pc_argsize = sizeof(struct nfsd3_voidargs),
.pc_ressize = sizeof(struct nfsd3_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
},
[NFS3PROC_GETATTR] = {
- .pc_func = (svc_procfunc) nfsd3_proc_getattr,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_attrstatres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_func = nfsd3_proc_getattr,
+ .pc_decode = nfs3svc_decode_fhandleargs,
+ .pc_encode = nfs3svc_encode_attrstatres,
+ .pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_fhandleargs),
.pc_ressize = sizeof(struct nfsd3_attrstatres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
},
[NFS3PROC_SETATTR] = {
- .pc_func = (svc_procfunc) nfsd3_proc_setattr,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_sattrargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_func = nfsd3_proc_setattr,
+ .pc_decode = nfs3svc_decode_sattrargs,
+ .pc_encode = nfs3svc_encode_wccstatres,
+ .pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_sattrargs),
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
},
[NFS3PROC_LOOKUP] = {
- .pc_func = (svc_procfunc) nfsd3_proc_lookup,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_diropres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_func = nfsd3_proc_lookup,
+ .pc_decode = nfs3svc_decode_diropargs,
+ .pc_encode = nfs3svc_encode_diropres,
+ .pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_diropargs),
.pc_ressize = sizeof(struct nfsd3_diropres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+FH+pAT+pAT,
},
[NFS3PROC_ACCESS] = {
- .pc_func = (svc_procfunc) nfsd3_proc_access,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_accessargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_accessres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_func = nfsd3_proc_access,
+ .pc_decode = nfs3svc_decode_accessargs,
+ .pc_encode = nfs3svc_encode_accessres,
+ .pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_accessargs),
.pc_ressize = sizeof(struct nfsd3_accessres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+1,
},
[NFS3PROC_READLINK] = {
- .pc_func = (svc_procfunc) nfsd3_proc_readlink,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_readlinkargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_readlinkres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_func = nfsd3_proc_readlink,
+ .pc_decode = nfs3svc_decode_readlinkargs,
+ .pc_encode = nfs3svc_encode_readlinkres,
+ .pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_readlinkargs),
.pc_ressize = sizeof(struct nfsd3_readlinkres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+1+NFS3_MAXPATHLEN/4,
},
[NFS3PROC_READ] = {
- .pc_func = (svc_procfunc) nfsd3_proc_read,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_readargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_readres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_func = nfsd3_proc_read,
+ .pc_decode = nfs3svc_decode_readargs,
+ .pc_encode = nfs3svc_encode_readres,
+ .pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_readargs),
.pc_ressize = sizeof(struct nfsd3_readres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+4+NFSSVC_MAXBLKSIZE/4,
},
[NFS3PROC_WRITE] = {
- .pc_func = (svc_procfunc) nfsd3_proc_write,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_writeargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_writeres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_func = nfsd3_proc_write,
+ .pc_decode = nfs3svc_decode_writeargs,
+ .pc_encode = nfs3svc_encode_writeres,
+ .pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_writeargs),
.pc_ressize = sizeof(struct nfsd3_writeres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC+4,
},
[NFS3PROC_CREATE] = {
- .pc_func = (svc_procfunc) nfsd3_proc_create,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_createargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_func = nfsd3_proc_create,
+ .pc_decode = nfs3svc_decode_createargs,
+ .pc_encode = nfs3svc_encode_createres,
+ .pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_createargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
},
[NFS3PROC_MKDIR] = {
- .pc_func = (svc_procfunc) nfsd3_proc_mkdir,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_mkdirargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_func = nfsd3_proc_mkdir,
+ .pc_decode = nfs3svc_decode_mkdirargs,
+ .pc_encode = nfs3svc_encode_createres,
+ .pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_mkdirargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
},
[NFS3PROC_SYMLINK] = {
- .pc_func = (svc_procfunc) nfsd3_proc_symlink,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_symlinkargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_func = nfsd3_proc_symlink,
+ .pc_decode = nfs3svc_decode_symlinkargs,
+ .pc_encode = nfs3svc_encode_createres,
+ .pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_symlinkargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
},
[NFS3PROC_MKNOD] = {
- .pc_func = (svc_procfunc) nfsd3_proc_mknod,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_mknodargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_func = nfsd3_proc_mknod,
+ .pc_decode = nfs3svc_decode_mknodargs,
+ .pc_encode = nfs3svc_encode_createres,
+ .pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_mknodargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
},
[NFS3PROC_REMOVE] = {
- .pc_func = (svc_procfunc) nfsd3_proc_remove,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_func = nfsd3_proc_remove,
+ .pc_decode = nfs3svc_decode_diropargs,
+ .pc_encode = nfs3svc_encode_wccstatres,
+ .pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_diropargs),
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
},
[NFS3PROC_RMDIR] = {
- .pc_func = (svc_procfunc) nfsd3_proc_rmdir,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_func = nfsd3_proc_rmdir,
+ .pc_decode = nfs3svc_decode_diropargs,
+ .pc_encode = nfs3svc_encode_wccstatres,
+ .pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_diropargs),
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
},
[NFS3PROC_RENAME] = {
- .pc_func = (svc_procfunc) nfsd3_proc_rename,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_renameargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_renameres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_func = nfsd3_proc_rename,
+ .pc_decode = nfs3svc_decode_renameargs,
+ .pc_encode = nfs3svc_encode_renameres,
+ .pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_renameargs),
.pc_ressize = sizeof(struct nfsd3_renameres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC+WC,
},
[NFS3PROC_LINK] = {
- .pc_func = (svc_procfunc) nfsd3_proc_link,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_linkargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_linkres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_func = nfsd3_proc_link,
+ .pc_decode = nfs3svc_decode_linkargs,
+ .pc_encode = nfs3svc_encode_linkres,
+ .pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_linkargs),
.pc_ressize = sizeof(struct nfsd3_linkres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+pAT+WC,
},
[NFS3PROC_READDIR] = {
- .pc_func = (svc_procfunc) nfsd3_proc_readdir,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_func = nfsd3_proc_readdir,
+ .pc_decode = nfs3svc_decode_readdirargs,
+ .pc_encode = nfs3svc_encode_readdirres,
+ .pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_readdirargs),
.pc_ressize = sizeof(struct nfsd3_readdirres),
.pc_cachetype = RC_NOCACHE,
},
[NFS3PROC_READDIRPLUS] = {
- .pc_func = (svc_procfunc) nfsd3_proc_readdirplus,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirplusargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_func = nfsd3_proc_readdirplus,
+ .pc_decode = nfs3svc_decode_readdirplusargs,
+ .pc_encode = nfs3svc_encode_readdirres,
+ .pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_readdirplusargs),
.pc_ressize = sizeof(struct nfsd3_readdirres),
.pc_cachetype = RC_NOCACHE,
},
[NFS3PROC_FSSTAT] = {
- .pc_func = (svc_procfunc) nfsd3_proc_fsstat,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_fsstatres,
+ .pc_func = nfsd3_proc_fsstat,
+ .pc_decode = nfs3svc_decode_fhandleargs,
+ .pc_encode = nfs3svc_encode_fsstatres,
.pc_argsize = sizeof(struct nfsd3_fhandleargs),
.pc_ressize = sizeof(struct nfsd3_fsstatres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+2*6+1,
},
[NFS3PROC_FSINFO] = {
- .pc_func = (svc_procfunc) nfsd3_proc_fsinfo,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_fsinfores,
+ .pc_func = nfsd3_proc_fsinfo,
+ .pc_decode = nfs3svc_decode_fhandleargs,
+ .pc_encode = nfs3svc_encode_fsinfores,
.pc_argsize = sizeof(struct nfsd3_fhandleargs),
.pc_ressize = sizeof(struct nfsd3_fsinfores),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+12,
},
[NFS3PROC_PATHCONF] = {
- .pc_func = (svc_procfunc) nfsd3_proc_pathconf,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_pathconfres,
+ .pc_func = nfsd3_proc_pathconf,
+ .pc_decode = nfs3svc_decode_fhandleargs,
+ .pc_encode = nfs3svc_encode_pathconfres,
.pc_argsize = sizeof(struct nfsd3_fhandleargs),
.pc_ressize = sizeof(struct nfsd3_pathconfres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+6,
},
[NFS3PROC_COMMIT] = {
- .pc_func = (svc_procfunc) nfsd3_proc_commit,
- .pc_decode = (kxdrproc_t) nfs3svc_decode_commitargs,
- .pc_encode = (kxdrproc_t) nfs3svc_encode_commitres,
- .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_func = nfsd3_proc_commit,
+ .pc_decode = nfs3svc_decode_commitargs,
+ .pc_encode = nfs3svc_encode_commitres,
+ .pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_commitargs),
.pc_ressize = sizeof(struct nfsd3_commitres),
.pc_cachetype = RC_NOCACHE,
@@ -881,10 +890,12 @@ static struct svc_procedure nfsd_procedures3[22] = {
},
};
-struct svc_version nfsd_version3 = {
- .vs_vers = 3,
- .vs_nproc = 22,
- .vs_proc = nfsd_procedures3,
- .vs_dispatch = nfsd_dispatch,
- .vs_xdrsize = NFS3_SVC_XDRSIZE,
+static unsigned int nfsd_count3[ARRAY_SIZE(nfsd_procedures3)];
+const struct svc_version nfsd_version3 = {
+ .vs_vers = 3,
+ .vs_nproc = 22,
+ .vs_proc = nfsd_procedures3,
+ .vs_dispatch = nfsd_dispatch,
+ .vs_count = nfsd_count3,
+ .vs_xdrsize = NFS3_SVC_XDRSIZE,
};
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 452334694a5d..bf444b664011 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -260,7 +260,7 @@ void fill_post_wcc(struct svc_fh *fhp)
printk("nfsd: inode locked twice during operation.\n");
err = fh_getattr(fhp, &fhp->fh_post_attr);
- fhp->fh_post_change = d_inode(fhp->fh_dentry)->i_version;
+ fhp->fh_post_change = nfsd4_change_attribute(d_inode(fhp->fh_dentry));
if (err) {
fhp->fh_post_saved = false;
/* Grab the ctime anyway - set_change_info might use it */
@@ -273,8 +273,10 @@ void fill_post_wcc(struct svc_fh *fhp)
* XDR decode functions
*/
int
-nfs3svc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *args)
+nfs3svc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_fhandle *args = rqstp->rq_argp;
+
p = decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -282,9 +284,10 @@ nfs3svc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *a
}
int
-nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_sattrargs *args)
+nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_sattrargs *args = rqstp->rq_argp;
+
p = decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -300,9 +303,10 @@ nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_diropargs *args)
+nfs3svc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_diropargs *args = rqstp->rq_argp;
+
if (!(p = decode_fh(p, &args->fh))
|| !(p = decode_filename(p, &args->name, &args->len)))
return 0;
@@ -311,9 +315,10 @@ nfs3svc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_accessargs *args)
+nfs3svc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_accessargs *args = rqstp->rq_argp;
+
p = decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -323,9 +328,9 @@ nfs3svc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_readargs *args)
+nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_readargs *args = rqstp->rq_argp;
unsigned int len;
int v;
u32 max_blocksize = svc_max_payload(rqstp);
@@ -353,9 +358,9 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_writeargs *args)
+nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_writeargs *args = rqstp->rq_argp;
unsigned int len, v, hdr, dlen;
u32 max_blocksize = svc_max_payload(rqstp);
struct kvec *head = rqstp->rq_arg.head;
@@ -413,9 +418,10 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_createargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_createargs *args)
+nfs3svc_decode_createargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_createargs *args = rqstp->rq_argp;
+
if (!(p = decode_fh(p, &args->fh))
|| !(p = decode_filename(p, &args->name, &args->len)))
return 0;
@@ -435,10 +441,12 @@ nfs3svc_decode_createargs(struct svc_rqst *rqstp, __be32 *p,
return xdr_argsize_check(rqstp, p);
}
+
int
-nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_createargs *args)
+nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_createargs *args = rqstp->rq_argp;
+
if (!(p = decode_fh(p, &args->fh)) ||
!(p = decode_filename(p, &args->name, &args->len)))
return 0;
@@ -448,9 +456,9 @@ nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_symlinkargs *args)
+nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_symlinkargs *args = rqstp->rq_argp;
unsigned int len, avail;
char *old, *new;
struct kvec *vec;
@@ -500,9 +508,10 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_mknodargs *args)
+nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_mknodargs *args = rqstp->rq_argp;
+
if (!(p = decode_fh(p, &args->fh))
|| !(p = decode_filename(p, &args->name, &args->len)))
return 0;
@@ -522,9 +531,10 @@ nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_renameargs *args)
+nfs3svc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_renameargs *args = rqstp->rq_argp;
+
if (!(p = decode_fh(p, &args->ffh))
|| !(p = decode_filename(p, &args->fname, &args->flen))
|| !(p = decode_fh(p, &args->tfh))
@@ -535,9 +545,10 @@ nfs3svc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_readlinkargs *args)
+nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_readlinkargs *args = rqstp->rq_argp;
+
p = decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -547,9 +558,10 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_linkargs *args)
+nfs3svc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_linkargs *args = rqstp->rq_argp;
+
if (!(p = decode_fh(p, &args->ffh))
|| !(p = decode_fh(p, &args->tfh))
|| !(p = decode_filename(p, &args->tname, &args->tlen)))
@@ -559,9 +571,9 @@ nfs3svc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_readdirargs *args)
+nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_readdirargs *args = rqstp->rq_argp;
p = decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -576,9 +588,9 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_readdirargs *args)
+nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_readdirargs *args = rqstp->rq_argp;
int len;
u32 max_blocksize = svc_max_payload(rqstp);
@@ -602,9 +614,9 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfs3svc_decode_commitargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_commitargs *args)
+nfs3svc_decode_commitargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_commitargs *args = rqstp->rq_argp;
p = decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -622,16 +634,17 @@ nfs3svc_decode_commitargs(struct svc_rqst *rqstp, __be32 *p,
* will work properly.
*/
int
-nfs3svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+nfs3svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p)
{
return xdr_ressize_check(rqstp, p);
}
/* GETATTR */
int
-nfs3svc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_attrstat *resp)
+nfs3svc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_attrstat *resp = rqstp->rq_resp;
+
if (resp->status == 0) {
lease_get_mtime(d_inode(resp->fh.fh_dentry),
&resp->stat.mtime);
@@ -642,18 +655,20 @@ nfs3svc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p,
/* SETATTR, REMOVE, RMDIR */
int
-nfs3svc_encode_wccstat(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_attrstat *resp)
+nfs3svc_encode_wccstat(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_attrstat *resp = rqstp->rq_resp;
+
p = encode_wcc_data(rqstp, p, &resp->fh);
return xdr_ressize_check(rqstp, p);
}
/* LOOKUP */
int
-nfs3svc_encode_diropres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_diropres *resp)
+nfs3svc_encode_diropres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_diropres *resp = rqstp->rq_resp;
+
if (resp->status == 0) {
p = encode_fh(p, &resp->fh);
p = encode_post_op_attr(rqstp, p, &resp->fh);
@@ -664,9 +679,10 @@ nfs3svc_encode_diropres(struct svc_rqst *rqstp, __be32 *p,
/* ACCESS */
int
-nfs3svc_encode_accessres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_accessres *resp)
+nfs3svc_encode_accessres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_accessres *resp = rqstp->rq_resp;
+
p = encode_post_op_attr(rqstp, p, &resp->fh);
if (resp->status == 0)
*p++ = htonl(resp->access);
@@ -675,9 +691,10 @@ nfs3svc_encode_accessres(struct svc_rqst *rqstp, __be32 *p,
/* READLINK */
int
-nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_readlinkres *resp)
+nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_readlinkres *resp = rqstp->rq_resp;
+
p = encode_post_op_attr(rqstp, p, &resp->fh);
if (resp->status == 0) {
*p++ = htonl(resp->len);
@@ -696,9 +713,10 @@ nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p,
/* READ */
int
-nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_readres *resp)
+nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_readres *resp = rqstp->rq_resp;
+
p = encode_post_op_attr(rqstp, p, &resp->fh);
if (resp->status == 0) {
*p++ = htonl(resp->count);
@@ -720,9 +738,9 @@ nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
/* WRITE */
int
-nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_writeres *resp)
+nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_writeres *resp = rqstp->rq_resp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
p = encode_wcc_data(rqstp, p, &resp->fh);
@@ -737,9 +755,10 @@ nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p,
/* CREATE, MKDIR, SYMLINK, MKNOD */
int
-nfs3svc_encode_createres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_diropres *resp)
+nfs3svc_encode_createres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_diropres *resp = rqstp->rq_resp;
+
if (resp->status == 0) {
*p++ = xdr_one;
p = encode_fh(p, &resp->fh);
@@ -751,9 +770,10 @@ nfs3svc_encode_createres(struct svc_rqst *rqstp, __be32 *p,
/* RENAME */
int
-nfs3svc_encode_renameres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_renameres *resp)
+nfs3svc_encode_renameres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_renameres *resp = rqstp->rq_resp;
+
p = encode_wcc_data(rqstp, p, &resp->ffh);
p = encode_wcc_data(rqstp, p, &resp->tfh);
return xdr_ressize_check(rqstp, p);
@@ -761,9 +781,10 @@ nfs3svc_encode_renameres(struct svc_rqst *rqstp, __be32 *p,
/* LINK */
int
-nfs3svc_encode_linkres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_linkres *resp)
+nfs3svc_encode_linkres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_linkres *resp = rqstp->rq_resp;
+
p = encode_post_op_attr(rqstp, p, &resp->fh);
p = encode_wcc_data(rqstp, p, &resp->tfh);
return xdr_ressize_check(rqstp, p);
@@ -771,9 +792,10 @@ nfs3svc_encode_linkres(struct svc_rqst *rqstp, __be32 *p,
/* READDIR */
int
-nfs3svc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_readdirres *resp)
+nfs3svc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_readdirres *resp = rqstp->rq_resp;
+
p = encode_post_op_attr(rqstp, p, &resp->fh);
if (resp->status == 0) {
@@ -1021,9 +1043,9 @@ nfs3svc_encode_entry_plus(void *cd, const char *name,
/* FSSTAT */
int
-nfs3svc_encode_fsstatres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_fsstatres *resp)
+nfs3svc_encode_fsstatres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_fsstatres *resp = rqstp->rq_resp;
struct kstatfs *s = &resp->stats;
u64 bs = s->f_bsize;
@@ -1043,9 +1065,10 @@ nfs3svc_encode_fsstatres(struct svc_rqst *rqstp, __be32 *p,
/* FSINFO */
int
-nfs3svc_encode_fsinfores(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_fsinfores *resp)
+nfs3svc_encode_fsinfores(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_fsinfores *resp = rqstp->rq_resp;
+
*p++ = xdr_zero; /* no post_op_attr */
if (resp->status == 0) {
@@ -1067,9 +1090,10 @@ nfs3svc_encode_fsinfores(struct svc_rqst *rqstp, __be32 *p,
/* PATHCONF */
int
-nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_pathconfres *resp)
+nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_pathconfres *resp = rqstp->rq_resp;
+
*p++ = xdr_zero; /* no post_op_attr */
if (resp->status == 0) {
@@ -1086,9 +1110,9 @@ nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, __be32 *p,
/* COMMIT */
int
-nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_commitres *resp)
+nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd3_commitres *resp = rqstp->rq_resp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
p = encode_wcc_data(rqstp, p, &resp->fh);
@@ -1103,19 +1127,19 @@ nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p,
/*
* XDR release functions
*/
-int
-nfs3svc_release_fhandle(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_attrstat *resp)
+void
+nfs3svc_release_fhandle(struct svc_rqst *rqstp)
{
+ struct nfsd3_attrstat *resp = rqstp->rq_resp;
+
fh_put(&resp->fh);
- return 1;
}
-int
-nfs3svc_release_fhandle2(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd3_fhandle_pair *resp)
+void
+nfs3svc_release_fhandle2(struct svc_rqst *rqstp)
{
+ struct nfsd3_fhandle_pair *resp = rqstp->rq_resp;
+
fh_put(&resp->fh1);
fh_put(&resp->fh2);
- return 1;
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 0274db6e65d0..49b0a9e7ff18 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -468,7 +468,7 @@ static int decode_cb_sequence4res(struct xdr_stream *xdr,
* NB: Without this zero space reservation, callbacks over krb5p fail
*/
static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
- void *__unused)
+ const void *__unused)
{
xdr_reserve_space(xdr, 0);
}
@@ -477,8 +477,9 @@ static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
* 20.2. Operation 4: CB_RECALL - Recall a Delegation
*/
static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct nfsd4_callback *cb)
+ const void *data)
{
+ const struct nfsd4_callback *cb = data;
const struct nfs4_delegation *dp = cb_to_delegation(cb);
struct nfs4_cb_compound_hdr hdr = {
.ident = cb->cb_clp->cl_cb_ident,
@@ -512,8 +513,9 @@ static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
*/
static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfsd4_callback *cb)
+ void *data)
{
+ struct nfsd4_callback *cb = data;
struct nfs4_cb_compound_hdr hdr;
int status;
@@ -585,8 +587,9 @@ static void encode_cb_layout4args(struct xdr_stream *xdr,
static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfsd4_callback *cb)
+ const void *data)
{
+ const struct nfsd4_callback *cb = data;
const struct nfs4_layout_stateid *ls =
container_of(cb, struct nfs4_layout_stateid, ls_recall);
struct nfs4_cb_compound_hdr hdr = {
@@ -602,8 +605,9 @@ static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfsd4_callback *cb)
+ void *data)
{
+ struct nfsd4_callback *cb = data;
struct nfs4_cb_compound_hdr hdr;
int status;
@@ -631,8 +635,9 @@ static void encode_stateowner(struct xdr_stream *xdr, struct nfs4_stateowner *so
static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfsd4_callback *cb)
+ const void *data)
{
+ const struct nfsd4_callback *cb = data;
const struct nfsd4_blocked_lock *nbl =
container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner;
@@ -659,8 +664,9 @@ static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct nfsd4_callback *cb)
+ void *data)
{
+ struct nfsd4_callback *cb = data;
struct nfs4_cb_compound_hdr hdr;
int status;
@@ -682,15 +688,15 @@ static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
#define PROC(proc, call, argtype, restype) \
[NFSPROC4_CLNT_##proc] = { \
.p_proc = NFSPROC4_CB_##call, \
- .p_encode = (kxdreproc_t)nfs4_xdr_enc_##argtype, \
- .p_decode = (kxdrdproc_t)nfs4_xdr_dec_##restype, \
+ .p_encode = nfs4_xdr_enc_##argtype, \
+ .p_decode = nfs4_xdr_dec_##restype, \
.p_arglen = NFS4_enc_##argtype##_sz, \
.p_replen = NFS4_dec_##restype##_sz, \
.p_statidx = NFSPROC4_CB_##call, \
.p_name = #proc, \
}
-static struct rpc_procinfo nfs4_cb_procedures[] = {
+static const struct rpc_procinfo nfs4_cb_procedures[] = {
PROC(CB_NULL, NULL, cb_null, cb_null),
PROC(CB_RECALL, COMPOUND, cb_recall, cb_recall),
#ifdef CONFIG_NFSD_PNFS
@@ -699,7 +705,8 @@ static struct rpc_procinfo nfs4_cb_procedures[] = {
PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
};
-static struct rpc_version nfs_cb_version4 = {
+static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
+static const struct rpc_version nfs_cb_version4 = {
/*
* Note on the callback rpc program version number: despite language in rfc
* 5661 section 18.36.3 requiring servers to use 4 in this field, the
@@ -709,11 +716,12 @@ static struct rpc_version nfs_cb_version4 = {
*/
.number = 1,
.nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
- .procs = nfs4_cb_procedures
+ .procs = nfs4_cb_procedures,
+ .counts = nfs4_cb_counts,
};
-static const struct rpc_version *nfs_cb_version[] = {
- &nfs_cb_version4,
+static const struct rpc_version *nfs_cb_version[2] = {
+ [1] = &nfs_cb_version4,
};
static const struct rpc_program cb_program;
@@ -787,7 +795,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
.saddress = (struct sockaddr *) &conn->cb_saddr,
.timeout = &timeparms,
.program = &cb_program,
- .version = 0,
+ .version = 1,
.flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
};
struct rpc_clnt *client;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index dadb3bf305b2..d27e75ad25e3 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -344,8 +344,9 @@ copy_clientid(clientid_t *clid, struct nfsd4_session *session)
static __be32
nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_open *open)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_open *open = &u->open;
__be32 status;
struct svc_fh *resfh = NULL;
struct net *net = SVC_NET(rqstp);
@@ -467,14 +468,14 @@ out:
*/
static __be32 nfsd4_open_omfg(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_op *op)
{
- struct nfsd4_open *open = (struct nfsd4_open *)&op->u;
+ struct nfsd4_open *open = &op->u.open;
if (!seqid_mutating_err(ntohl(op->status)))
return op->status;
if (nfsd4_has_session(cstate))
return op->status;
open->op_xdr_error = op->status;
- return nfsd4_open(rqstp, cstate, open);
+ return nfsd4_open(rqstp, cstate, &op->u);
}
/*
@@ -482,19 +483,21 @@ static __be32 nfsd4_open_omfg(struct svc_rqst *rqstp, struct nfsd4_compound_stat
*/
static __be32
nfsd4_getfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct svc_fh **getfh)
+ union nfsd4_op_u *u)
{
if (!cstate->current_fh.fh_dentry)
return nfserr_nofilehandle;
- *getfh = &cstate->current_fh;
+ u->getfh = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_putfh *putfh)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_putfh *putfh = &u->putfh;
+
fh_put(&cstate->current_fh);
cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen;
memcpy(&cstate->current_fh.fh_handle.fh_base, putfh->pf_fhval,
@@ -504,7 +507,7 @@ nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static __be32
nfsd4_putrootfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- void *arg)
+ union nfsd4_op_u *u)
{
__be32 status;
@@ -515,7 +518,7 @@ nfsd4_putrootfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static __be32
nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- void *arg)
+ union nfsd4_op_u *u)
{
if (!cstate->save_fh.fh_dentry)
return nfserr_restorefh;
@@ -530,7 +533,7 @@ nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static __be32
nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- void *arg)
+ union nfsd4_op_u *u)
{
if (!cstate->current_fh.fh_dentry)
return nfserr_nofilehandle;
@@ -548,8 +551,10 @@ nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
*/
static __be32
nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_access *access)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_access *access = &u->access;
+
if (access->ac_req_access & ~NFS3_ACCESS_FULL)
return nfserr_inval;
@@ -574,8 +579,10 @@ static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
static __be32
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_commit *commit)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_commit *commit = &u->commit;
+
gen_boot_verifier(&commit->co_verf, SVC_NET(rqstp));
return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
commit->co_count);
@@ -583,8 +590,9 @@ nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static __be32
nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_create *create)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_create *create = &u->create;
struct svc_fh resfh;
__be32 status;
dev_t rdev;
@@ -670,8 +678,9 @@ out:
static __be32
nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_getattr *getattr)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_getattr *getattr = &u->getattr;
__be32 status;
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
@@ -691,8 +700,9 @@ nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static __be32
nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_link *link)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_link *link = &u->link;
__be32 status = nfserr_nofilehandle;
if (!cstate->save_fh.fh_dentry)
@@ -723,24 +733,25 @@ static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh)
static __be32
nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- void *arg)
+ union nfsd4_op_u *u)
{
return nfsd4_do_lookupp(rqstp, &cstate->current_fh);
}
static __be32
nfsd4_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_lookup *lookup)
+ union nfsd4_op_u *u)
{
return nfsd_lookup(rqstp, &cstate->current_fh,
- lookup->lo_name, lookup->lo_len,
+ u->lookup.lo_name, u->lookup.lo_len,
&cstate->current_fh);
}
static __be32
nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_read *read)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_read *read = &u->read;
__be32 status;
read->rd_filp = NULL;
@@ -775,8 +786,9 @@ out:
static __be32
nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_readdir *readdir)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_readdir *readdir = &u->readdir;
u64 cookie = readdir->rd_cookie;
static const nfs4_verifier zeroverf;
@@ -800,17 +812,18 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static __be32
nfsd4_readlink(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_readlink *readlink)
+ union nfsd4_op_u *u)
{
- readlink->rl_rqstp = rqstp;
- readlink->rl_fhp = &cstate->current_fh;
+ u->readlink.rl_rqstp = rqstp;
+ u->readlink.rl_fhp = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_remove *remove)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_remove *remove = &u->remove;
__be32 status;
if (opens_in_grace(SVC_NET(rqstp)))
@@ -826,8 +839,9 @@ nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static __be32
nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_rename *rename)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_rename *rename = &u->rename;
__be32 status = nfserr_nofilehandle;
if (!cstate->save_fh.fh_dentry)
@@ -847,8 +861,9 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static __be32
nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_secinfo *secinfo)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_secinfo *secinfo = &u->secinfo;
struct svc_export *exp;
struct dentry *dentry;
__be32 err;
@@ -876,11 +891,11 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static __be32
nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_secinfo_no_name *sin)
+ union nfsd4_op_u *u)
{
__be32 err;
- switch (sin->sin_style) {
+ switch (u->secinfo_no_name.sin_style) {
case NFS4_SECINFO_STYLE4_CURRENT_FH:
break;
case NFS4_SECINFO_STYLE4_PARENT:
@@ -892,15 +907,16 @@ nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstat
return nfserr_inval;
}
- sin->sin_exp = exp_get(cstate->current_fh.fh_export);
+ u->secinfo_no_name.sin_exp = exp_get(cstate->current_fh.fh_export);
fh_put(&cstate->current_fh);
return nfs_ok;
}
static __be32
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_setattr *setattr)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_setattr *setattr = &u->setattr;
__be32 status = nfs_ok;
int err;
@@ -960,8 +976,9 @@ static int fill_in_write_vector(struct kvec *vec, struct nfsd4_write *write)
static __be32
nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_write *write)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_write *write = &u->write;
stateid_t *stateid = &write->wr_stateid;
struct file *filp = NULL;
__be32 status = nfs_ok;
@@ -1034,8 +1051,9 @@ out_put_src:
static __be32
nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_clone *clone)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_clone *clone = &u->clone;
struct file *src, *dst;
__be32 status;
@@ -1055,8 +1073,9 @@ out:
static __be32
nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_copy *copy)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_copy *copy = &u->copy;
struct file *src, *dst;
__be32 status;
ssize_t bytes;
@@ -1111,23 +1130,24 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static __be32
nfsd4_allocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_fallocate *fallocate)
+ union nfsd4_op_u *u)
{
- return nfsd4_fallocate(rqstp, cstate, fallocate, 0);
+ return nfsd4_fallocate(rqstp, cstate, &u->allocate, 0);
}
static __be32
nfsd4_deallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_fallocate *fallocate)
+ union nfsd4_op_u *u)
{
- return nfsd4_fallocate(rqstp, cstate, fallocate,
+ return nfsd4_fallocate(rqstp, cstate, &u->deallocate,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
}
static __be32
nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_seek *seek)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_seek *seek = &u->seek;
int whence;
__be32 status;
struct file *file;
@@ -1232,21 +1252,21 @@ out_kfree:
static __be32
nfsd4_nverify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_verify *verify)
+ union nfsd4_op_u *u)
{
__be32 status;
- status = _nfsd4_verify(rqstp, cstate, verify);
+ status = _nfsd4_verify(rqstp, cstate, &u->verify);
return status == nfserr_not_same ? nfs_ok : status;
}
static __be32
nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_verify *verify)
+ union nfsd4_op_u *u)
{
__be32 status;
- status = _nfsd4_verify(rqstp, cstate, verify);
+ status = _nfsd4_verify(rqstp, cstate, &u->nverify);
return status == nfserr_same ? nfs_ok : status;
}
@@ -1271,9 +1291,9 @@ nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
static __be32
nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *cstate,
- struct nfsd4_getdeviceinfo *gdp)
+ struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
+ struct nfsd4_getdeviceinfo *gdp = &u->getdeviceinfo;
const struct nfsd4_layout_ops *ops;
struct nfsd4_deviceid_map *map;
struct svc_export *exp;
@@ -1317,9 +1337,9 @@ out:
static __be32
nfsd4_layoutget(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *cstate,
- struct nfsd4_layoutget *lgp)
+ struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
+ struct nfsd4_layoutget *lgp = &u->layoutget;
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
struct nfs4_layout_stateid *ls;
@@ -1397,9 +1417,9 @@ out:
static __be32
nfsd4_layoutcommit(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *cstate,
- struct nfsd4_layoutcommit *lcp)
+ struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
+ struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
@@ -1461,9 +1481,9 @@ out:
static __be32
nfsd4_layoutreturn(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *cstate,
- struct nfsd4_layoutreturn *lrp)
+ struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
+ struct nfsd4_layoutreturn *lrp = &u->layoutreturn;
struct svc_fh *current_fh = &cstate->current_fh;
__be32 nfserr;
@@ -1510,7 +1530,7 @@ out:
* NULL call.
*/
static __be32
-nfsd4_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+nfsd4_proc_null(struct svc_rqst *rqstp)
{
return nfs_ok;
}
@@ -1521,12 +1541,6 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
nfsdstats.nfs4_opcount[opnum]++;
}
-typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
- void *);
-typedef u32(*nfsd4op_rsize)(struct svc_rqst *, struct nfsd4_op *op);
-typedef void(*stateid_setter)(struct nfsd4_compound_state *, void *);
-typedef void(*stateid_getter)(struct nfsd4_compound_state *, void *);
-
enum nfsd4_op_flags {
ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
@@ -1558,16 +1572,19 @@ enum nfsd4_op_flags {
};
struct nfsd4_operation {
- nfsd4op_func op_func;
+ __be32 (*op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
u32 op_flags;
char *op_name;
/* Try to get response size before operation */
- nfsd4op_rsize op_rsize_bop;
- stateid_getter op_get_currentstateid;
- stateid_setter op_set_currentstateid;
+ u32 (*op_rsize_bop)(struct svc_rqst *, struct nfsd4_op *);
+ void (*op_get_currentstateid)(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
+ void (*op_set_currentstateid)(struct nfsd4_compound_state *,
+ union nfsd4_op_u *);
};
-static struct nfsd4_operation nfsd4_ops[];
+static const struct nfsd4_operation nfsd4_ops[];
static const char *nfsd4_op_name(unsigned opnum);
@@ -1604,7 +1621,7 @@ static __be32 nfs41_check_op_ordering(struct nfsd4_compoundargs *args)
return nfs_ok;
}
-static inline struct nfsd4_operation *OPDESC(struct nfsd4_op *op)
+static inline const struct nfsd4_operation *OPDESC(struct nfsd4_op *op)
{
return &nfsd4_ops[op->opnum];
}
@@ -1622,10 +1639,9 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp)
struct nfsd4_compoundargs *argp = rqstp->rq_argp;
struct nfsd4_op *this = &argp->ops[resp->opcnt - 1];
struct nfsd4_op *next = &argp->ops[resp->opcnt];
- struct nfsd4_operation *thisd;
- struct nfsd4_operation *nextd;
+ const struct nfsd4_operation *thisd = OPDESC(this);
+ const struct nfsd4_operation *nextd;
- thisd = OPDESC(this);
/*
* Most ops check wronsec on our own; only the putfh-like ops
* have special rules.
@@ -1673,12 +1689,12 @@ static void svcxdr_init_encode(struct svc_rqst *rqstp,
* COMPOUND call.
*/
static __be32
-nfsd4_proc_compound(struct svc_rqst *rqstp,
- struct nfsd4_compoundargs *args,
- struct nfsd4_compoundres *resp)
+nfsd4_proc_compound(struct svc_rqst *rqstp)
{
+ struct nfsd4_compoundargs *args = rqstp->rq_argp;
+ struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfsd4_op *op;
- struct nfsd4_operation *opdesc;
+ const struct nfsd4_operation *opdesc;
struct nfsd4_compound_state *cstate = &resp->cstate;
struct svc_fh *current_fh = &cstate->current_fh;
struct svc_fh *save_fh = &cstate->save_fh;
@@ -2091,360 +2107,360 @@ static inline u32 nfsd4_seek_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
return (op_encode_hdr_size + 3) * sizeof(__be32);
}
-static struct nfsd4_operation nfsd4_ops[] = {
+static const struct nfsd4_operation nfsd4_ops[] = {
[OP_ACCESS] = {
- .op_func = (nfsd4op_func)nfsd4_access,
+ .op_func = nfsd4_access,
.op_name = "OP_ACCESS",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_access_rsize,
+ .op_rsize_bop = nfsd4_access_rsize,
},
[OP_CLOSE] = {
- .op_func = (nfsd4op_func)nfsd4_close,
+ .op_func = nfsd4_close,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_CLOSE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
- .op_get_currentstateid = (stateid_getter)nfsd4_get_closestateid,
- .op_set_currentstateid = (stateid_setter)nfsd4_set_closestateid,
+ .op_rsize_bop = nfsd4_status_stateid_rsize,
+ .op_get_currentstateid = nfsd4_get_closestateid,
+ .op_set_currentstateid = nfsd4_set_closestateid,
},
[OP_COMMIT] = {
- .op_func = (nfsd4op_func)nfsd4_commit,
+ .op_func = nfsd4_commit,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_COMMIT",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_commit_rsize,
+ .op_rsize_bop = nfsd4_commit_rsize,
},
[OP_CREATE] = {
- .op_func = (nfsd4op_func)nfsd4_create,
+ .op_func = nfsd4_create,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME | OP_CLEAR_STATEID,
.op_name = "OP_CREATE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_create_rsize,
+ .op_rsize_bop = nfsd4_create_rsize,
},
[OP_DELEGRETURN] = {
- .op_func = (nfsd4op_func)nfsd4_delegreturn,
+ .op_func = nfsd4_delegreturn,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_DELEGRETURN",
.op_rsize_bop = nfsd4_only_status_rsize,
- .op_get_currentstateid = (stateid_getter)nfsd4_get_delegreturnstateid,
+ .op_get_currentstateid = nfsd4_get_delegreturnstateid,
},
[OP_GETATTR] = {
- .op_func = (nfsd4op_func)nfsd4_getattr,
+ .op_func = nfsd4_getattr,
.op_flags = ALLOWED_ON_ABSENT_FS,
.op_rsize_bop = nfsd4_getattr_rsize,
.op_name = "OP_GETATTR",
},
[OP_GETFH] = {
- .op_func = (nfsd4op_func)nfsd4_getfh,
+ .op_func = nfsd4_getfh,
.op_name = "OP_GETFH",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_getfh_rsize,
+ .op_rsize_bop = nfsd4_getfh_rsize,
},
[OP_LINK] = {
- .op_func = (nfsd4op_func)nfsd4_link,
+ .op_func = nfsd4_link,
.op_flags = ALLOWED_ON_ABSENT_FS | OP_MODIFIES_SOMETHING
| OP_CACHEME,
.op_name = "OP_LINK",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_link_rsize,
+ .op_rsize_bop = nfsd4_link_rsize,
},
[OP_LOCK] = {
- .op_func = (nfsd4op_func)nfsd4_lock,
+ .op_func = nfsd4_lock,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCK",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize,
- .op_set_currentstateid = (stateid_setter)nfsd4_set_lockstateid,
+ .op_rsize_bop = nfsd4_lock_rsize,
+ .op_set_currentstateid = nfsd4_set_lockstateid,
},
[OP_LOCKT] = {
- .op_func = (nfsd4op_func)nfsd4_lockt,
+ .op_func = nfsd4_lockt,
.op_name = "OP_LOCKT",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize,
+ .op_rsize_bop = nfsd4_lock_rsize,
},
[OP_LOCKU] = {
- .op_func = (nfsd4op_func)nfsd4_locku,
+ .op_func = nfsd4_locku,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCKU",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
- .op_get_currentstateid = (stateid_getter)nfsd4_get_lockustateid,
+ .op_rsize_bop = nfsd4_status_stateid_rsize,
+ .op_get_currentstateid = nfsd4_get_lockustateid,
},
[OP_LOOKUP] = {
- .op_func = (nfsd4op_func)nfsd4_lookup,
+ .op_func = nfsd4_lookup,
.op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
.op_name = "OP_LOOKUP",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_LOOKUPP] = {
- .op_func = (nfsd4op_func)nfsd4_lookupp,
+ .op_func = nfsd4_lookupp,
.op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
.op_name = "OP_LOOKUPP",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_NVERIFY] = {
- .op_func = (nfsd4op_func)nfsd4_nverify,
+ .op_func = nfsd4_nverify,
.op_name = "OP_NVERIFY",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_OPEN] = {
- .op_func = (nfsd4op_func)nfsd4_open,
+ .op_func = nfsd4_open,
.op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_open_rsize,
- .op_set_currentstateid = (stateid_setter)nfsd4_set_openstateid,
+ .op_rsize_bop = nfsd4_open_rsize,
+ .op_set_currentstateid = nfsd4_set_openstateid,
},
[OP_OPEN_CONFIRM] = {
- .op_func = (nfsd4op_func)nfsd4_open_confirm,
+ .op_func = nfsd4_open_confirm,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_CONFIRM",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
+ .op_rsize_bop = nfsd4_status_stateid_rsize,
},
[OP_OPEN_DOWNGRADE] = {
- .op_func = (nfsd4op_func)nfsd4_open_downgrade,
+ .op_func = nfsd4_open_downgrade,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_DOWNGRADE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
- .op_get_currentstateid = (stateid_getter)nfsd4_get_opendowngradestateid,
- .op_set_currentstateid = (stateid_setter)nfsd4_set_opendowngradestateid,
+ .op_rsize_bop = nfsd4_status_stateid_rsize,
+ .op_get_currentstateid = nfsd4_get_opendowngradestateid,
+ .op_set_currentstateid = nfsd4_set_opendowngradestateid,
},
[OP_PUTFH] = {
- .op_func = (nfsd4op_func)nfsd4_putfh,
+ .op_func = nfsd4_putfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTFH",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_PUTPUBFH] = {
- .op_func = (nfsd4op_func)nfsd4_putrootfh,
+ .op_func = nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTPUBFH",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_PUTROOTFH] = {
- .op_func = (nfsd4op_func)nfsd4_putrootfh,
+ .op_func = nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTROOTFH",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_READ] = {
- .op_func = (nfsd4op_func)nfsd4_read,
+ .op_func = nfsd4_read,
.op_name = "OP_READ",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_read_rsize,
- .op_get_currentstateid = (stateid_getter)nfsd4_get_readstateid,
+ .op_rsize_bop = nfsd4_read_rsize,
+ .op_get_currentstateid = nfsd4_get_readstateid,
},
[OP_READDIR] = {
- .op_func = (nfsd4op_func)nfsd4_readdir,
+ .op_func = nfsd4_readdir,
.op_name = "OP_READDIR",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_readdir_rsize,
+ .op_rsize_bop = nfsd4_readdir_rsize,
},
[OP_READLINK] = {
- .op_func = (nfsd4op_func)nfsd4_readlink,
+ .op_func = nfsd4_readlink,
.op_name = "OP_READLINK",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_readlink_rsize,
+ .op_rsize_bop = nfsd4_readlink_rsize,
},
[OP_REMOVE] = {
- .op_func = (nfsd4op_func)nfsd4_remove,
+ .op_func = nfsd4_remove,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_REMOVE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_remove_rsize,
+ .op_rsize_bop = nfsd4_remove_rsize,
},
[OP_RENAME] = {
- .op_func = (nfsd4op_func)nfsd4_rename,
+ .op_func = nfsd4_rename,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_RENAME",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_rename_rsize,
+ .op_rsize_bop = nfsd4_rename_rsize,
},
[OP_RENEW] = {
- .op_func = (nfsd4op_func)nfsd4_renew,
+ .op_func = nfsd4_renew,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING,
.op_name = "OP_RENEW",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_RESTOREFH] = {
- .op_func = (nfsd4op_func)nfsd4_restorefh,
+ .op_func = nfsd4_restorefh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_RESTOREFH",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_SAVEFH] = {
- .op_func = (nfsd4op_func)nfsd4_savefh,
+ .op_func = nfsd4_savefh,
.op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_SAVEFH",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_SECINFO] = {
- .op_func = (nfsd4op_func)nfsd4_secinfo,
+ .op_func = nfsd4_secinfo,
.op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_secinfo_rsize,
+ .op_rsize_bop = nfsd4_secinfo_rsize,
},
[OP_SETATTR] = {
- .op_func = (nfsd4op_func)nfsd4_setattr,
+ .op_func = nfsd4_setattr,
.op_name = "OP_SETATTR",
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_setattr_rsize,
- .op_get_currentstateid = (stateid_getter)nfsd4_get_setattrstateid,
+ .op_rsize_bop = nfsd4_setattr_rsize,
+ .op_get_currentstateid = nfsd4_get_setattrstateid,
},
[OP_SETCLIENTID] = {
- .op_func = (nfsd4op_func)nfsd4_setclientid,
+ .op_func = nfsd4_setclientid,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETCLIENTID",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_setclientid_rsize,
+ .op_rsize_bop = nfsd4_setclientid_rsize,
},
[OP_SETCLIENTID_CONFIRM] = {
- .op_func = (nfsd4op_func)nfsd4_setclientid_confirm,
+ .op_func = nfsd4_setclientid_confirm,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETCLIENTID_CONFIRM",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_VERIFY] = {
- .op_func = (nfsd4op_func)nfsd4_verify,
+ .op_func = nfsd4_verify,
.op_name = "OP_VERIFY",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_WRITE] = {
- .op_func = (nfsd4op_func)nfsd4_write,
+ .op_func = nfsd4_write,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_WRITE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
- .op_get_currentstateid = (stateid_getter)nfsd4_get_writestateid,
+ .op_rsize_bop = nfsd4_write_rsize,
+ .op_get_currentstateid = nfsd4_get_writestateid,
},
[OP_RELEASE_LOCKOWNER] = {
- .op_func = (nfsd4op_func)nfsd4_release_lockowner,
+ .op_func = nfsd4_release_lockowner,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING,
.op_name = "OP_RELEASE_LOCKOWNER",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
/* NFSv4.1 operations */
[OP_EXCHANGE_ID] = {
- .op_func = (nfsd4op_func)nfsd4_exchange_id,
+ .op_func = nfsd4_exchange_id,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_EXCHANGE_ID",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_exchange_id_rsize,
+ .op_rsize_bop = nfsd4_exchange_id_rsize,
},
[OP_BACKCHANNEL_CTL] = {
- .op_func = (nfsd4op_func)nfsd4_backchannel_ctl,
+ .op_func = nfsd4_backchannel_ctl,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_BACKCHANNEL_CTL",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_BIND_CONN_TO_SESSION] = {
- .op_func = (nfsd4op_func)nfsd4_bind_conn_to_session,
+ .op_func = nfsd4_bind_conn_to_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_BIND_CONN_TO_SESSION",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_bind_conn_to_session_rsize,
+ .op_rsize_bop = nfsd4_bind_conn_to_session_rsize,
},
[OP_CREATE_SESSION] = {
- .op_func = (nfsd4op_func)nfsd4_create_session,
+ .op_func = nfsd4_create_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_CREATE_SESSION",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_create_session_rsize,
+ .op_rsize_bop = nfsd4_create_session_rsize,
},
[OP_DESTROY_SESSION] = {
- .op_func = (nfsd4op_func)nfsd4_destroy_session,
+ .op_func = nfsd4_destroy_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_SESSION",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_SEQUENCE] = {
- .op_func = (nfsd4op_func)nfsd4_sequence,
+ .op_func = nfsd4_sequence,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_name = "OP_SEQUENCE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_sequence_rsize,
+ .op_rsize_bop = nfsd4_sequence_rsize,
},
[OP_DESTROY_CLIENTID] = {
- .op_func = (nfsd4op_func)nfsd4_destroy_clientid,
+ .op_func = nfsd4_destroy_clientid,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_CLIENTID",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_RECLAIM_COMPLETE] = {
- .op_func = (nfsd4op_func)nfsd4_reclaim_complete,
+ .op_func = nfsd4_reclaim_complete,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_RECLAIM_COMPLETE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_SECINFO_NO_NAME] = {
- .op_func = (nfsd4op_func)nfsd4_secinfo_no_name,
+ .op_func = nfsd4_secinfo_no_name,
.op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO_NO_NAME",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_secinfo_rsize,
+ .op_rsize_bop = nfsd4_secinfo_rsize,
},
[OP_TEST_STATEID] = {
- .op_func = (nfsd4op_func)nfsd4_test_stateid,
+ .op_func = nfsd4_test_stateid,
.op_flags = ALLOWED_WITHOUT_FH,
.op_name = "OP_TEST_STATEID",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_test_stateid_rsize,
+ .op_rsize_bop = nfsd4_test_stateid_rsize,
},
[OP_FREE_STATEID] = {
- .op_func = (nfsd4op_func)nfsd4_free_stateid,
+ .op_func = nfsd4_free_stateid,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_FREE_STATEID",
- .op_get_currentstateid = (stateid_getter)nfsd4_get_freestateid,
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_get_currentstateid = nfsd4_get_freestateid,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = {
- .op_func = (nfsd4op_func)nfsd4_getdeviceinfo,
+ .op_func = nfsd4_getdeviceinfo,
.op_flags = ALLOWED_WITHOUT_FH,
.op_name = "OP_GETDEVICEINFO",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_getdeviceinfo_rsize,
+ .op_rsize_bop = nfsd4_getdeviceinfo_rsize,
},
[OP_LAYOUTGET] = {
- .op_func = (nfsd4op_func)nfsd4_layoutget,
+ .op_func = nfsd4_layoutget,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTGET",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutget_rsize,
+ .op_rsize_bop = nfsd4_layoutget_rsize,
},
[OP_LAYOUTCOMMIT] = {
- .op_func = (nfsd4op_func)nfsd4_layoutcommit,
+ .op_func = nfsd4_layoutcommit,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTCOMMIT",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutcommit_rsize,
+ .op_rsize_bop = nfsd4_layoutcommit_rsize,
},
[OP_LAYOUTRETURN] = {
- .op_func = (nfsd4op_func)nfsd4_layoutreturn,
+ .op_func = nfsd4_layoutreturn,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTRETURN",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutreturn_rsize,
+ .op_rsize_bop = nfsd4_layoutreturn_rsize,
},
#endif /* CONFIG_NFSD_PNFS */
/* NFSv4.2 operations */
[OP_ALLOCATE] = {
- .op_func = (nfsd4op_func)nfsd4_allocate,
+ .op_func = nfsd4_allocate,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_ALLOCATE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_DEALLOCATE] = {
- .op_func = (nfsd4op_func)nfsd4_deallocate,
+ .op_func = nfsd4_deallocate,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_DEALLOCATE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_CLONE] = {
- .op_func = (nfsd4op_func)nfsd4_clone,
+ .op_func = nfsd4_clone,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_CLONE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_COPY] = {
- .op_func = (nfsd4op_func)nfsd4_copy,
+ .op_func = nfsd4_copy,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_COPY",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_copy_rsize,
+ .op_rsize_bop = nfsd4_copy_rsize,
},
[OP_SEEK] = {
- .op_func = (nfsd4op_func)nfsd4_seek,
+ .op_func = nfsd4_seek,
.op_name = "OP_SEEK",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_seek_rsize,
+ .op_rsize_bop = nfsd4_seek_rsize,
},
};
@@ -2515,19 +2531,19 @@ static const char *nfsd4_op_name(unsigned opnum)
#define nfsd4_voidres nfsd4_voidargs
struct nfsd4_voidargs { int dummy; };
-static struct svc_procedure nfsd_procedures4[2] = {
+static const struct svc_procedure nfsd_procedures4[2] = {
[NFSPROC4_NULL] = {
- .pc_func = (svc_procfunc) nfsd4_proc_null,
- .pc_encode = (kxdrproc_t) nfs4svc_encode_voidres,
+ .pc_func = nfsd4_proc_null,
+ .pc_encode = nfs4svc_encode_voidres,
.pc_argsize = sizeof(struct nfsd4_voidargs),
.pc_ressize = sizeof(struct nfsd4_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 1,
},
[NFSPROC4_COMPOUND] = {
- .pc_func = (svc_procfunc) nfsd4_proc_compound,
- .pc_decode = (kxdrproc_t) nfs4svc_decode_compoundargs,
- .pc_encode = (kxdrproc_t) nfs4svc_encode_compoundres,
+ .pc_func = nfsd4_proc_compound,
+ .pc_decode = nfs4svc_decode_compoundargs,
+ .pc_encode = nfs4svc_encode_compoundres,
.pc_argsize = sizeof(struct nfsd4_compoundargs),
.pc_ressize = sizeof(struct nfsd4_compoundres),
.pc_release = nfsd4_release_compoundargs,
@@ -2536,10 +2552,12 @@ static struct svc_procedure nfsd_procedures4[2] = {
},
};
-struct svc_version nfsd_version4 = {
+static unsigned int nfsd_count3[ARRAY_SIZE(nfsd_procedures4)];
+const struct svc_version nfsd_version4 = {
.vs_vers = 4,
.vs_nproc = 2,
.vs_proc = nfsd_procedures4,
+ .vs_count = nfsd_count3,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS4_SVC_XDRSIZE,
.vs_rpcb_optnl = true,
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 22002fb75a18..0c04f81aa63b 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -2402,10 +2402,10 @@ static bool client_has_state(struct nfs4_client *clp)
}
__be32
-nfsd4_exchange_id(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *cstate,
- struct nfsd4_exchange_id *exid)
+nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_exchange_id *exid = &u->exchange_id;
struct nfs4_client *conf, *new;
struct nfs4_client *unconf = NULL;
__be32 status;
@@ -2698,9 +2698,9 @@ static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
__be32
nfsd4_create_session(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *cstate,
- struct nfsd4_create_session *cr_ses)
+ struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
+ struct nfsd4_create_session *cr_ses = &u->create_session;
struct sockaddr *sa = svc_addr(rqstp);
struct nfs4_client *conf, *unconf;
struct nfs4_client *old = NULL;
@@ -2824,8 +2824,11 @@ static __be32 nfsd4_map_bcts_dir(u32 *dir)
return nfserr_inval;
}
-__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
+__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
struct nfsd4_session *session = cstate->session;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
__be32 status;
@@ -2845,8 +2848,9 @@ __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state
__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
- struct nfsd4_bind_conn_to_session *bcts)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
__be32 status;
struct nfsd4_conn *conn;
struct nfsd4_session *session;
@@ -2886,10 +2890,10 @@ static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4
}
__be32
-nfsd4_destroy_session(struct svc_rqst *r,
- struct nfsd4_compound_state *cstate,
- struct nfsd4_destroy_session *sessionid)
+nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_destroy_session *sessionid = &u->destroy_session;
struct nfsd4_session *ses;
__be32 status;
int ref_held_by_me = 0;
@@ -2983,10 +2987,10 @@ static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
}
__be32
-nfsd4_sequence(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *cstate,
- struct nfsd4_sequence *seq)
+nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_sequence *seq = &u->sequence;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct xdr_stream *xdr = &resp->xdr;
struct nfsd4_session *session;
@@ -3120,8 +3124,11 @@ nfsd4_sequence_done(struct nfsd4_compoundres *resp)
}
__be32
-nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
+nfsd4_destroy_clientid(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
struct nfs4_client *conf, *unconf;
struct nfs4_client *clp = NULL;
__be32 status = 0;
@@ -3161,8 +3168,10 @@ out:
}
__be32
-nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
+nfsd4_reclaim_complete(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
+ struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
__be32 status = 0;
if (rc->rca_one_fs) {
@@ -3199,8 +3208,9 @@ out:
__be32
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_setclientid *setclid)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_setclientid *setclid = &u->setclientid;
struct xdr_netobj clname = setclid->se_name;
nfs4_verifier clverifier = setclid->se_verf;
struct nfs4_client *conf, *new;
@@ -3257,9 +3267,11 @@ out:
__be32
nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *cstate,
- struct nfsd4_setclientid_confirm *setclientid_confirm)
+ struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_setclientid_confirm *setclientid_confirm =
+ &u->setclientid_confirm;
struct nfs4_client *conf, *unconf;
struct nfs4_client *old = NULL;
nfs4_verifier confirm = setclientid_confirm->sc_confirm;
@@ -4506,8 +4518,9 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
__be32
nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- clientid_t *clid)
+ union nfsd4_op_u *u)
{
+ clientid_t *clid = &u->renew;
struct nfs4_client *clp;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
@@ -4993,8 +5006,9 @@ out:
*/
__be32
nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_test_stateid *test_stateid)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
struct nfsd4_test_stateid_id *stateid;
struct nfs4_client *cl = cstate->session->se_client;
@@ -5033,8 +5047,9 @@ out:
__be32
nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_free_stateid *free_stateid)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
stateid_t *stateid = &free_stateid->fr_stateid;
struct nfs4_stid *s;
struct nfs4_delegation *dp;
@@ -5162,8 +5177,9 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
__be32
nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_open_confirm *oc)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_open_confirm *oc = &u->open_confirm;
__be32 status;
struct nfs4_openowner *oo;
struct nfs4_ol_stateid *stp;
@@ -5230,9 +5246,9 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
__be32
nfsd4_open_downgrade(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *cstate,
- struct nfsd4_open_downgrade *od)
+ struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
+ struct nfsd4_open_downgrade *od = &u->open_downgrade;
__be32 status;
struct nfs4_ol_stateid *stp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
@@ -5300,8 +5316,9 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
*/
__be32
nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_close *close)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_close *close = &u->close;
__be32 status;
struct nfs4_ol_stateid *stp;
struct net *net = SVC_NET(rqstp);
@@ -5330,8 +5347,9 @@ out:
__be32
nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_delegreturn *dr)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_delegreturn *dr = &u->delegreturn;
struct nfs4_delegation *dp;
stateid_t *stateid = &dr->dr_stateid;
struct nfs4_stid *s;
@@ -5706,8 +5724,9 @@ out:
*/
__be32
nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_lock *lock)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_lock *lock = &u->lock;
struct nfs4_openowner *open_sop = NULL;
struct nfs4_lockowner *lock_sop = NULL;
struct nfs4_ol_stateid *lock_stp = NULL;
@@ -5939,8 +5958,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
*/
__be32
nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_lockt *lockt)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_lockt *lockt = &u->lockt;
struct file_lock *file_lock = NULL;
struct nfs4_lockowner *lo = NULL;
__be32 status;
@@ -6012,8 +6032,9 @@ out:
__be32
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- struct nfsd4_locku *locku)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_locku *locku = &u->locku;
struct nfs4_ol_stateid *stp;
struct file *filp = NULL;
struct file_lock *file_lock = NULL;
@@ -6119,8 +6140,9 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
__be32
nfsd4_release_lockowner(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
- struct nfsd4_release_lockowner *rlockowner)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
clientid_t *clid = &rlockowner->rl_clientid;
struct nfs4_stateowner *sop;
struct nfs4_lockowner *lo = NULL;
@@ -7103,27 +7125,31 @@ clear_current_stateid(struct nfsd4_compound_state *cstate)
* functions to set current state id
*/
void
-nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
+nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- put_stateid(cstate, &odp->od_stateid);
+ put_stateid(cstate, &u->open_downgrade.od_stateid);
}
void
-nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
+nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- put_stateid(cstate, &open->op_stateid);
+ put_stateid(cstate, &u->open.op_stateid);
}
void
-nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
+nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- put_stateid(cstate, &close->cl_stateid);
+ put_stateid(cstate, &u->close.cl_stateid);
}
void
-nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
+nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- put_stateid(cstate, &lock->lk_resp_stateid);
+ put_stateid(cstate, &u->lock.lk_resp_stateid);
}
/*
@@ -7131,49 +7157,57 @@ nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lo
*/
void
-nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
+nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- get_stateid(cstate, &odp->od_stateid);
+ get_stateid(cstate, &u->open_downgrade.od_stateid);
}
void
-nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
+nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- get_stateid(cstate, &drp->dr_stateid);
+ get_stateid(cstate, &u->delegreturn.dr_stateid);
}
void
-nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
+nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- get_stateid(cstate, &fsp->fr_stateid);
+ get_stateid(cstate, &u->free_stateid.fr_stateid);
}
void
-nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
+nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- get_stateid(cstate, &setattr->sa_stateid);
+ get_stateid(cstate, &u->setattr.sa_stateid);
}
void
-nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
+nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- get_stateid(cstate, &close->cl_stateid);
+ get_stateid(cstate, &u->close.cl_stateid);
}
void
-nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
+nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- get_stateid(cstate, &locku->lu_stateid);
+ get_stateid(cstate, &u->locku.lu_stateid);
}
void
-nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
+nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- get_stateid(cstate, &read->rd_stateid);
+ get_stateid(cstate, &u->read.rd_stateid);
}
void
-nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
+nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
{
- get_stateid(cstate, &write->wr_stateid);
+ get_stateid(cstate, &u->write.wr_stateid);
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 26780d53a6f9..20fbcab97753 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1973,7 +1973,7 @@ static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
*p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));
*p++ = 0;
} else if (IS_I_VERSION(inode)) {
- p = xdr_encode_hyper(p, inode->i_version);
+ p = xdr_encode_hyper(p, nfsd4_change_attribute(inode));
} else {
*p++ = cpu_to_be32(stat->ctime.tv_sec);
*p++ = cpu_to_be32(stat->ctime.tv_nsec);
@@ -4538,14 +4538,13 @@ nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op)
}
int
-nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p)
{
return xdr_ressize_check(rqstp, p);
}
-int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp)
+void nfsd4_release_compoundargs(struct svc_rqst *rqstp)
{
- struct svc_rqst *rqstp = rq;
struct nfsd4_compoundargs *args = rqstp->rq_argp;
if (args->ops != args->iops) {
@@ -4559,12 +4558,13 @@ int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp)
args->to_free = tb->next;
kfree(tb);
}
- return 1;
}
int
-nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundargs *args)
+nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd4_compoundargs *args = rqstp->rq_argp;
+
if (rqstp->rq_arg.head[0].iov_len % 4) {
/* client is nuts */
dprintk("%s: compound not properly padded! (peeraddr=%pISc xid=0x%x)",
@@ -4584,11 +4584,12 @@ nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_comp
}
int
-nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundres *resp)
+nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p)
{
/*
* All that remains is to write the tag and operation count...
*/
+ struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct xdr_buf *buf = resp->xdr.buf;
WARN_ON_ONCE(buf->len != buf->head[0].iov_len + buf->page_len +
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index d96606801d47..b9c538ab7a59 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -60,7 +60,7 @@ struct readdir_cd {
extern struct svc_program nfsd_program;
-extern struct svc_version nfsd_version2, nfsd_version3,
+extern const struct svc_version nfsd_version2, nfsd_version3,
nfsd_version4;
extern struct mutex nfsd_mutex;
extern spinlock_t nfsd_drc_lock;
@@ -86,12 +86,12 @@ void nfsd_destroy(struct net *net);
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
#ifdef CONFIG_NFSD_V2_ACL
-extern struct svc_version nfsd_acl_version2;
+extern const struct svc_version nfsd_acl_version2;
#else
#define nfsd_acl_version2 NULL
#endif
#ifdef CONFIG_NFSD_V3_ACL
-extern struct svc_version nfsd_acl_version3;
+extern const struct svc_version nfsd_acl_version3;
#else
#define nfsd_acl_version3 NULL
#endif
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index f84fe6bf9aee..e47cf6c2ac28 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -241,6 +241,28 @@ fh_clear_wcc(struct svc_fh *fhp)
}
/*
+ * We could use i_version alone as the change attribute. However,
+ * i_version can go backwards after a reboot. On its own that doesn't
+ * necessarily cause a problem, but if i_version goes backwards and then
+ * is incremented again it could reuse a value that was previously used
+ * before boot, and a client who queried the two values might
+ * incorrectly assume nothing changed.
+ *
+ * By using both ctime and the i_version counter we guarantee that as
+ * long as time doesn't go backwards we never reuse an old value.
+ */
+static inline u64 nfsd4_change_attribute(struct inode *inode)
+{
+ u64 chattr;
+
+ chattr = inode->i_ctime.tv_sec;
+ chattr <<= 30;
+ chattr += inode->i_ctime.tv_nsec;
+ chattr += inode->i_version;
+ return chattr;
+}
+
+/*
* Fill in the pre_op attr for the wcc data
*/
static inline void
@@ -253,7 +275,7 @@ fill_pre_wcc(struct svc_fh *fhp)
fhp->fh_pre_mtime = inode->i_mtime;
fhp->fh_pre_ctime = inode->i_ctime;
fhp->fh_pre_size = inode->i_size;
- fhp->fh_pre_change = inode->i_version;
+ fhp->fh_pre_change = nfsd4_change_attribute(inode);
fhp->fh_pre_saved = true;
}
}
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 03a7e9da4da0..5076ae2b8258 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -17,7 +17,7 @@ typedef struct svc_buf svc_buf;
static __be32
-nfsd_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+nfsd_proc_null(struct svc_rqst *rqstp)
{
return nfs_ok;
}
@@ -39,9 +39,10 @@ nfsd_return_dirop(__be32 err, struct nfsd_diropres *resp)
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
-nfsd_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp,
- struct nfsd_attrstat *resp)
+nfsd_proc_getattr(struct svc_rqst *rqstp)
{
+ struct nfsd_fhandle *argp = rqstp->rq_argp;
+ struct nfsd_attrstat *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh));
@@ -56,9 +57,10 @@ nfsd_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp,
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
-nfsd_proc_setattr(struct svc_rqst *rqstp, struct nfsd_sattrargs *argp,
- struct nfsd_attrstat *resp)
+nfsd_proc_setattr(struct svc_rqst *rqstp)
{
+ struct nfsd_sattrargs *argp = rqstp->rq_argp;
+ struct nfsd_attrstat *resp = rqstp->rq_resp;
struct iattr *iap = &argp->attrs;
struct svc_fh *fhp;
__be32 nfserr;
@@ -122,9 +124,10 @@ done:
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
-nfsd_proc_lookup(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
- struct nfsd_diropres *resp)
+nfsd_proc_lookup(struct svc_rqst *rqstp)
{
+ struct nfsd_diropargs *argp = rqstp->rq_argp;
+ struct nfsd_diropres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: LOOKUP %s %.*s\n",
@@ -142,9 +145,10 @@ nfsd_proc_lookup(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
* Read a symlink.
*/
static __be32
-nfsd_proc_readlink(struct svc_rqst *rqstp, struct nfsd_readlinkargs *argp,
- struct nfsd_readlinkres *resp)
+nfsd_proc_readlink(struct svc_rqst *rqstp)
{
+ struct nfsd_readlinkargs *argp = rqstp->rq_argp;
+ struct nfsd_readlinkres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: READLINK %s\n", SVCFH_fmt(&argp->fh));
@@ -162,9 +166,10 @@ nfsd_proc_readlink(struct svc_rqst *rqstp, struct nfsd_readlinkargs *argp,
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
-nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
- struct nfsd_readres *resp)
+nfsd_proc_read(struct svc_rqst *rqstp)
{
+ struct nfsd_readargs *argp = rqstp->rq_argp;
+ struct nfsd_readres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: READ %s %d bytes at %d\n",
@@ -200,9 +205,10 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
-nfsd_proc_write(struct svc_rqst *rqstp, struct nfsd_writeargs *argp,
- struct nfsd_attrstat *resp)
+nfsd_proc_write(struct svc_rqst *rqstp)
{
+ struct nfsd_writeargs *argp = rqstp->rq_argp;
+ struct nfsd_attrstat *resp = rqstp->rq_resp;
__be32 nfserr;
unsigned long cnt = argp->len;
@@ -222,9 +228,10 @@ nfsd_proc_write(struct svc_rqst *rqstp, struct nfsd_writeargs *argp,
* N.B. After this call _both_ argp->fh and resp->fh need an fh_put
*/
static __be32
-nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
- struct nfsd_diropres *resp)
+nfsd_proc_create(struct svc_rqst *rqstp)
{
+ struct nfsd_createargs *argp = rqstp->rq_argp;
+ struct nfsd_diropres *resp = rqstp->rq_resp;
svc_fh *dirfhp = &argp->fh;
svc_fh *newfhp = &resp->fh;
struct iattr *attr = &argp->attrs;
@@ -377,9 +384,9 @@ done:
}
static __be32
-nfsd_proc_remove(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
- void *resp)
+nfsd_proc_remove(struct svc_rqst *rqstp)
{
+ struct nfsd_diropargs *argp = rqstp->rq_argp;
__be32 nfserr;
dprintk("nfsd: REMOVE %s %.*s\n", SVCFH_fmt(&argp->fh),
@@ -392,9 +399,9 @@ nfsd_proc_remove(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
}
static __be32
-nfsd_proc_rename(struct svc_rqst *rqstp, struct nfsd_renameargs *argp,
- void *resp)
+nfsd_proc_rename(struct svc_rqst *rqstp)
{
+ struct nfsd_renameargs *argp = rqstp->rq_argp;
__be32 nfserr;
dprintk("nfsd: RENAME %s %.*s -> \n",
@@ -410,9 +417,9 @@ nfsd_proc_rename(struct svc_rqst *rqstp, struct nfsd_renameargs *argp,
}
static __be32
-nfsd_proc_link(struct svc_rqst *rqstp, struct nfsd_linkargs *argp,
- void *resp)
+nfsd_proc_link(struct svc_rqst *rqstp)
{
+ struct nfsd_linkargs *argp = rqstp->rq_argp;
__be32 nfserr;
dprintk("nfsd: LINK %s ->\n",
@@ -430,9 +437,9 @@ nfsd_proc_link(struct svc_rqst *rqstp, struct nfsd_linkargs *argp,
}
static __be32
-nfsd_proc_symlink(struct svc_rqst *rqstp, struct nfsd_symlinkargs *argp,
- void *resp)
+nfsd_proc_symlink(struct svc_rqst *rqstp)
{
+ struct nfsd_symlinkargs *argp = rqstp->rq_argp;
struct svc_fh newfh;
__be32 nfserr;
@@ -460,9 +467,10 @@ nfsd_proc_symlink(struct svc_rqst *rqstp, struct nfsd_symlinkargs *argp,
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
-nfsd_proc_mkdir(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
- struct nfsd_diropres *resp)
+nfsd_proc_mkdir(struct svc_rqst *rqstp)
{
+ struct nfsd_createargs *argp = rqstp->rq_argp;
+ struct nfsd_diropres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: MKDIR %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
@@ -484,9 +492,9 @@ nfsd_proc_mkdir(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
* Remove a directory
*/
static __be32
-nfsd_proc_rmdir(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
- void *resp)
+nfsd_proc_rmdir(struct svc_rqst *rqstp)
{
+ struct nfsd_diropargs *argp = rqstp->rq_argp;
__be32 nfserr;
dprintk("nfsd: RMDIR %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
@@ -500,9 +508,10 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
* Read a portion of a directory.
*/
static __be32
-nfsd_proc_readdir(struct svc_rqst *rqstp, struct nfsd_readdirargs *argp,
- struct nfsd_readdirres *resp)
+nfsd_proc_readdir(struct svc_rqst *rqstp)
{
+ struct nfsd_readdirargs *argp = rqstp->rq_argp;
+ struct nfsd_readdirres *resp = rqstp->rq_resp;
int count;
__be32 nfserr;
loff_t offset;
@@ -540,9 +549,10 @@ nfsd_proc_readdir(struct svc_rqst *rqstp, struct nfsd_readdirargs *argp,
* Get file system info
*/
static __be32
-nfsd_proc_statfs(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
- struct nfsd_statfsres *resp)
+nfsd_proc_statfs(struct svc_rqst *rqstp)
{
+ struct nfsd_fhandle *argp = rqstp->rq_argp;
+ struct nfsd_statfsres *resp = rqstp->rq_resp;
__be32 nfserr;
dprintk("nfsd: STATFS %s\n", SVCFH_fmt(&argp->fh));
@@ -563,168 +573,168 @@ struct nfsd_void { int dummy; };
#define FH 8 /* filehandle */
#define AT 18 /* attributes */
-static struct svc_procedure nfsd_procedures2[18] = {
+static const struct svc_procedure nfsd_procedures2[18] = {
[NFSPROC_NULL] = {
- .pc_func = (svc_procfunc) nfsd_proc_null,
- .pc_decode = (kxdrproc_t) nfssvc_decode_void,
- .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_func = nfsd_proc_null,
+ .pc_decode = nfssvc_decode_void,
+ .pc_encode = nfssvc_encode_void,
.pc_argsize = sizeof(struct nfsd_void),
.pc_ressize = sizeof(struct nfsd_void),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
},
[NFSPROC_GETATTR] = {
- .pc_func = (svc_procfunc) nfsd_proc_getattr,
- .pc_decode = (kxdrproc_t) nfssvc_decode_fhandle,
- .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
- .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_func = nfsd_proc_getattr,
+ .pc_decode = nfssvc_decode_fhandle,
+ .pc_encode = nfssvc_encode_attrstat,
+ .pc_release = nfssvc_release_fhandle,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
},
[NFSPROC_SETATTR] = {
- .pc_func = (svc_procfunc) nfsd_proc_setattr,
- .pc_decode = (kxdrproc_t) nfssvc_decode_sattrargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
- .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_func = nfsd_proc_setattr,
+ .pc_decode = nfssvc_decode_sattrargs,
+ .pc_encode = nfssvc_encode_attrstat,
+ .pc_release = nfssvc_release_fhandle,
.pc_argsize = sizeof(struct nfsd_sattrargs),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+AT,
},
[NFSPROC_ROOT] = {
- .pc_decode = (kxdrproc_t) nfssvc_decode_void,
- .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_decode = nfssvc_decode_void,
+ .pc_encode = nfssvc_encode_void,
.pc_argsize = sizeof(struct nfsd_void),
.pc_ressize = sizeof(struct nfsd_void),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
},
[NFSPROC_LOOKUP] = {
- .pc_func = (svc_procfunc) nfsd_proc_lookup,
- .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
- .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_func = nfsd_proc_lookup,
+ .pc_decode = nfssvc_decode_diropargs,
+ .pc_encode = nfssvc_encode_diropres,
+ .pc_release = nfssvc_release_fhandle,
.pc_argsize = sizeof(struct nfsd_diropargs),
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+FH+AT,
},
[NFSPROC_READLINK] = {
- .pc_func = (svc_procfunc) nfsd_proc_readlink,
- .pc_decode = (kxdrproc_t) nfssvc_decode_readlinkargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_readlinkres,
+ .pc_func = nfsd_proc_readlink,
+ .pc_decode = nfssvc_decode_readlinkargs,
+ .pc_encode = nfssvc_encode_readlinkres,
.pc_argsize = sizeof(struct nfsd_readlinkargs),
.pc_ressize = sizeof(struct nfsd_readlinkres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+NFS_MAXPATHLEN/4,
},
[NFSPROC_READ] = {
- .pc_func = (svc_procfunc) nfsd_proc_read,
- .pc_decode = (kxdrproc_t) nfssvc_decode_readargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_readres,
- .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_func = nfsd_proc_read,
+ .pc_decode = nfssvc_decode_readargs,
+ .pc_encode = nfssvc_encode_readres,
+ .pc_release = nfssvc_release_fhandle,
.pc_argsize = sizeof(struct nfsd_readargs),
.pc_ressize = sizeof(struct nfsd_readres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
},
[NFSPROC_WRITECACHE] = {
- .pc_decode = (kxdrproc_t) nfssvc_decode_void,
- .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_decode = nfssvc_decode_void,
+ .pc_encode = nfssvc_encode_void,
.pc_argsize = sizeof(struct nfsd_void),
.pc_ressize = sizeof(struct nfsd_void),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
},
[NFSPROC_WRITE] = {
- .pc_func = (svc_procfunc) nfsd_proc_write,
- .pc_decode = (kxdrproc_t) nfssvc_decode_writeargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
- .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_func = nfsd_proc_write,
+ .pc_decode = nfssvc_decode_writeargs,
+ .pc_encode = nfssvc_encode_attrstat,
+ .pc_release = nfssvc_release_fhandle,
.pc_argsize = sizeof(struct nfsd_writeargs),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+AT,
},
[NFSPROC_CREATE] = {
- .pc_func = (svc_procfunc) nfsd_proc_create,
- .pc_decode = (kxdrproc_t) nfssvc_decode_createargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
- .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_func = nfsd_proc_create,
+ .pc_decode = nfssvc_decode_createargs,
+ .pc_encode = nfssvc_encode_diropres,
+ .pc_release = nfssvc_release_fhandle,
.pc_argsize = sizeof(struct nfsd_createargs),
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+FH+AT,
},
[NFSPROC_REMOVE] = {
- .pc_func = (svc_procfunc) nfsd_proc_remove,
- .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_func = nfsd_proc_remove,
+ .pc_decode = nfssvc_decode_diropargs,
+ .pc_encode = nfssvc_encode_void,
.pc_argsize = sizeof(struct nfsd_diropargs),
.pc_ressize = sizeof(struct nfsd_void),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
},
[NFSPROC_RENAME] = {
- .pc_func = (svc_procfunc) nfsd_proc_rename,
- .pc_decode = (kxdrproc_t) nfssvc_decode_renameargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_func = nfsd_proc_rename,
+ .pc_decode = nfssvc_decode_renameargs,
+ .pc_encode = nfssvc_encode_void,
.pc_argsize = sizeof(struct nfsd_renameargs),
.pc_ressize = sizeof(struct nfsd_void),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
},
[NFSPROC_LINK] = {
- .pc_func = (svc_procfunc) nfsd_proc_link,
- .pc_decode = (kxdrproc_t) nfssvc_decode_linkargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_func = nfsd_proc_link,
+ .pc_decode = nfssvc_decode_linkargs,
+ .pc_encode = nfssvc_encode_void,
.pc_argsize = sizeof(struct nfsd_linkargs),
.pc_ressize = sizeof(struct nfsd_void),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
},
[NFSPROC_SYMLINK] = {
- .pc_func = (svc_procfunc) nfsd_proc_symlink,
- .pc_decode = (kxdrproc_t) nfssvc_decode_symlinkargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_func = nfsd_proc_symlink,
+ .pc_decode = nfssvc_decode_symlinkargs,
+ .pc_encode = nfssvc_encode_void,
.pc_argsize = sizeof(struct nfsd_symlinkargs),
.pc_ressize = sizeof(struct nfsd_void),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
},
[NFSPROC_MKDIR] = {
- .pc_func = (svc_procfunc) nfsd_proc_mkdir,
- .pc_decode = (kxdrproc_t) nfssvc_decode_createargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
- .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_func = nfsd_proc_mkdir,
+ .pc_decode = nfssvc_decode_createargs,
+ .pc_encode = nfssvc_encode_diropres,
+ .pc_release = nfssvc_release_fhandle,
.pc_argsize = sizeof(struct nfsd_createargs),
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+FH+AT,
},
[NFSPROC_RMDIR] = {
- .pc_func = (svc_procfunc) nfsd_proc_rmdir,
- .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_func = nfsd_proc_rmdir,
+ .pc_decode = nfssvc_decode_diropargs,
+ .pc_encode = nfssvc_encode_void,
.pc_argsize = sizeof(struct nfsd_diropargs),
.pc_ressize = sizeof(struct nfsd_void),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
},
[NFSPROC_READDIR] = {
- .pc_func = (svc_procfunc) nfsd_proc_readdir,
- .pc_decode = (kxdrproc_t) nfssvc_decode_readdirargs,
- .pc_encode = (kxdrproc_t) nfssvc_encode_readdirres,
+ .pc_func = nfsd_proc_readdir,
+ .pc_decode = nfssvc_decode_readdirargs,
+ .pc_encode = nfssvc_encode_readdirres,
.pc_argsize = sizeof(struct nfsd_readdirargs),
.pc_ressize = sizeof(struct nfsd_readdirres),
.pc_cachetype = RC_NOCACHE,
},
[NFSPROC_STATFS] = {
- .pc_func = (svc_procfunc) nfsd_proc_statfs,
- .pc_decode = (kxdrproc_t) nfssvc_decode_fhandle,
- .pc_encode = (kxdrproc_t) nfssvc_encode_statfsres,
+ .pc_func = nfsd_proc_statfs,
+ .pc_decode = nfssvc_decode_fhandle,
+ .pc_encode = nfssvc_encode_statfsres,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_statfsres),
.pc_cachetype = RC_NOCACHE,
@@ -733,12 +743,14 @@ static struct svc_procedure nfsd_procedures2[18] = {
};
-struct svc_version nfsd_version2 = {
- .vs_vers = 2,
- .vs_nproc = 18,
- .vs_proc = nfsd_procedures2,
- .vs_dispatch = nfsd_dispatch,
- .vs_xdrsize = NFS2_SVC_XDRSIZE,
+static unsigned int nfsd_count2[ARRAY_SIZE(nfsd_procedures2)];
+const struct svc_version nfsd_version2 = {
+ .vs_vers = 2,
+ .vs_nproc = 18,
+ .vs_proc = nfsd_procedures2,
+ .vs_count = nfsd_count2,
+ .vs_dispatch = nfsd_dispatch,
+ .vs_xdrsize = NFS2_SVC_XDRSIZE,
};
/*
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 59979f0bbd4b..063ae7de2c12 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -68,14 +68,14 @@ unsigned long nfsd_drc_mem_used;
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static struct svc_stat nfsd_acl_svcstats;
-static struct svc_version * nfsd_acl_version[] = {
+static const struct svc_version *nfsd_acl_version[] = {
[2] = &nfsd_acl_version2,
[3] = &nfsd_acl_version3,
};
#define NFSD_ACL_MINVERS 2
#define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
-static struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS];
+static const struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS];
static struct svc_program nfsd_acl_program = {
.pg_prog = NFS_ACL_PROGRAM,
@@ -92,7 +92,7 @@ static struct svc_stat nfsd_acl_svcstats = {
};
#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
-static struct svc_version * nfsd_version[] = {
+static const struct svc_version *nfsd_version[] = {
[2] = &nfsd_version2,
#if defined(CONFIG_NFSD_V3)
[3] = &nfsd_version3,
@@ -104,7 +104,7 @@ static struct svc_version * nfsd_version[] = {
#define NFSD_MINVERS 2
#define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
-static struct svc_version *nfsd_versions[NFSD_NRVERS];
+static const struct svc_version *nfsd_versions[NFSD_NRVERS];
struct svc_program nfsd_program = {
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
@@ -756,7 +756,7 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr)
* problem, we enforce these assumptions here:
*/
static bool nfs_request_too_big(struct svc_rqst *rqstp,
- struct svc_procedure *proc)
+ const struct svc_procedure *proc)
{
/*
* The ACL code has more careful bounds-checking and is not
@@ -781,8 +781,7 @@ static bool nfs_request_too_big(struct svc_rqst *rqstp,
int
nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
{
- struct svc_procedure *proc;
- kxdrproc_t xdr;
+ const struct svc_procedure *proc;
__be32 nfserr;
__be32 *nfserrp;
@@ -801,9 +800,8 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
*/
rqstp->rq_cachetype = proc->pc_cachetype;
/* Decode arguments */
- xdr = proc->pc_decode;
- if (xdr && !xdr(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base,
- rqstp->rq_argp)) {
+ if (proc->pc_decode &&
+ !proc->pc_decode(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base)) {
dprintk("nfsd: failed to decode arguments!\n");
*statp = rpc_garbage_args;
return 1;
@@ -827,7 +825,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
rqstp->rq_res.head[0].iov_len += sizeof(__be32);
/* Now call the procedure handler, and encode NFS status. */
- nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
+ nfserr = proc->pc_func(rqstp);
nfserr = map_new_errors(rqstp->rq_vers, nfserr);
if (nfserr == nfserr_dropit || test_bit(RQ_DROPME, &rqstp->rq_flags)) {
dprintk("nfsd: Dropping request; may be revisited later\n");
@@ -842,9 +840,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
* For NFSv2, additional info is never returned in case of an error.
*/
if (!(nfserr && rqstp->rq_vers == 2)) {
- xdr = proc->pc_encode;
- if (xdr && !xdr(rqstp, nfserrp,
- rqstp->rq_resp)) {
+ if (proc->pc_encode && !proc->pc_encode(rqstp, nfserrp)) {
/* Failed to encode result. Release cache entry */
dprintk("nfsd: failed to encode result!\n");
nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index de07ff625777..e4da2717982d 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -206,14 +206,16 @@ __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *f
* XDR decode functions
*/
int
-nfssvc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+nfssvc_decode_void(struct svc_rqst *rqstp, __be32 *p)
{
return xdr_argsize_check(rqstp, p);
}
int
-nfssvc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *args)
+nfssvc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_fhandle *args = rqstp->rq_argp;
+
p = decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -221,9 +223,10 @@ nfssvc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *ar
}
int
-nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_sattrargs *args)
+nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_sattrargs *args = rqstp->rq_argp;
+
p = decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -233,9 +236,10 @@ nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_diropargs *args)
+nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_diropargs *args = rqstp->rq_argp;
+
if (!(p = decode_fh(p, &args->fh))
|| !(p = decode_filename(p, &args->name, &args->len)))
return 0;
@@ -244,9 +248,9 @@ nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_readargs *args)
+nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_readargs *args = rqstp->rq_argp;
unsigned int len;
int v;
p = decode_fh(p, &args->fh);
@@ -276,9 +280,9 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_writeargs *args)
+nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_writeargs *args = rqstp->rq_argp;
unsigned int len, hdr, dlen;
struct kvec *head = rqstp->rq_arg.head;
int v;
@@ -332,9 +336,10 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_createargs *args)
+nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_createargs *args = rqstp->rq_argp;
+
if ( !(p = decode_fh(p, &args->fh))
|| !(p = decode_filename(p, &args->name, &args->len)))
return 0;
@@ -344,9 +349,10 @@ nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfssvc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_renameargs *args)
+nfssvc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_renameargs *args = rqstp->rq_argp;
+
if (!(p = decode_fh(p, &args->ffh))
|| !(p = decode_filename(p, &args->fname, &args->flen))
|| !(p = decode_fh(p, &args->tfh))
@@ -357,8 +363,10 @@ nfssvc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readlinkargs *args)
+nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_readlinkargs *args = rqstp->rq_argp;
+
p = decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -368,9 +376,10 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli
}
int
-nfssvc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_linkargs *args)
+nfssvc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_linkargs *args = rqstp->rq_argp;
+
if (!(p = decode_fh(p, &args->ffh))
|| !(p = decode_fh(p, &args->tfh))
|| !(p = decode_filename(p, &args->tname, &args->tlen)))
@@ -380,9 +389,10 @@ nfssvc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_symlinkargs *args)
+nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_symlinkargs *args = rqstp->rq_argp;
+
if ( !(p = decode_fh(p, &args->ffh))
|| !(p = decode_filename(p, &args->fname, &args->flen))
|| !(p = decode_pathname(p, &args->tname, &args->tlen)))
@@ -393,9 +403,10 @@ nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_readdirargs *args)
+nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_readdirargs *args = rqstp->rq_argp;
+
p = decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -411,32 +422,35 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
* XDR encode functions
*/
int
-nfssvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+nfssvc_encode_void(struct svc_rqst *rqstp, __be32 *p)
{
return xdr_ressize_check(rqstp, p);
}
int
-nfssvc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_attrstat *resp)
+nfssvc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_attrstat *resp = rqstp->rq_resp;
+
p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
return xdr_ressize_check(rqstp, p);
}
int
-nfssvc_encode_diropres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_diropres *resp)
+nfssvc_encode_diropres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_diropres *resp = rqstp->rq_resp;
+
p = encode_fh(p, &resp->fh);
p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
return xdr_ressize_check(rqstp, p);
}
int
-nfssvc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_readlinkres *resp)
+nfssvc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_readlinkres *resp = rqstp->rq_resp;
+
*p++ = htonl(resp->len);
xdr_ressize_check(rqstp, p);
rqstp->rq_res.page_len = resp->len;
@@ -450,9 +464,10 @@ nfssvc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_readres *resp)
+nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_readres *resp = rqstp->rq_resp;
+
p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
*p++ = htonl(resp->count);
xdr_ressize_check(rqstp, p);
@@ -469,9 +484,10 @@ nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfssvc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_readdirres *resp)
+nfssvc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_readdirres *resp = rqstp->rq_resp;
+
xdr_ressize_check(rqstp, p);
p = resp->buffer;
*p++ = 0; /* no more entries */
@@ -482,9 +498,9 @@ nfssvc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p,
}
int
-nfssvc_encode_statfsres(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_statfsres *resp)
+nfssvc_encode_statfsres(struct svc_rqst *rqstp, __be32 *p)
{
+ struct nfsd_statfsres *resp = rqstp->rq_resp;
struct kstatfs *stat = &resp->stats;
*p++ = htonl(NFSSVC_MAXBLKSIZE_V2); /* max transfer size */
@@ -543,10 +559,10 @@ nfssvc_encode_entry(void *ccdv, const char *name,
/*
* XDR release functions
*/
-int
-nfssvc_release_fhandle(struct svc_rqst *rqstp, __be32 *p,
- struct nfsd_fhandle *resp)
+void
+nfssvc_release_fhandle(struct svc_rqst *rqstp)
{
+ struct nfsd_fhandle *resp = rqstp->rq_resp;
+
fh_put(&resp->fh);
- return 1;
}
diff --git a/fs/nfsd/xdr.h b/fs/nfsd/xdr.h
index 4f0481d63804..457ce45e5084 100644
--- a/fs/nfsd/xdr.h
+++ b/fs/nfsd/xdr.h
@@ -131,40 +131,30 @@ union nfsd_xdrstore {
#define NFS2_SVC_XDRSIZE sizeof(union nfsd_xdrstore)
-int nfssvc_decode_void(struct svc_rqst *, __be32 *, void *);
-int nfssvc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
-int nfssvc_decode_sattrargs(struct svc_rqst *, __be32 *,
- struct nfsd_sattrargs *);
-int nfssvc_decode_diropargs(struct svc_rqst *, __be32 *,
- struct nfsd_diropargs *);
-int nfssvc_decode_readargs(struct svc_rqst *, __be32 *,
- struct nfsd_readargs *);
-int nfssvc_decode_writeargs(struct svc_rqst *, __be32 *,
- struct nfsd_writeargs *);
-int nfssvc_decode_createargs(struct svc_rqst *, __be32 *,
- struct nfsd_createargs *);
-int nfssvc_decode_renameargs(struct svc_rqst *, __be32 *,
- struct nfsd_renameargs *);
-int nfssvc_decode_readlinkargs(struct svc_rqst *, __be32 *,
- struct nfsd_readlinkargs *);
-int nfssvc_decode_linkargs(struct svc_rqst *, __be32 *,
- struct nfsd_linkargs *);
-int nfssvc_decode_symlinkargs(struct svc_rqst *, __be32 *,
- struct nfsd_symlinkargs *);
-int nfssvc_decode_readdirargs(struct svc_rqst *, __be32 *,
- struct nfsd_readdirargs *);
-int nfssvc_encode_void(struct svc_rqst *, __be32 *, void *);
-int nfssvc_encode_attrstat(struct svc_rqst *, __be32 *, struct nfsd_attrstat *);
-int nfssvc_encode_diropres(struct svc_rqst *, __be32 *, struct nfsd_diropres *);
-int nfssvc_encode_readlinkres(struct svc_rqst *, __be32 *, struct nfsd_readlinkres *);
-int nfssvc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd_readres *);
-int nfssvc_encode_statfsres(struct svc_rqst *, __be32 *, struct nfsd_statfsres *);
-int nfssvc_encode_readdirres(struct svc_rqst *, __be32 *, struct nfsd_readdirres *);
+int nfssvc_decode_void(struct svc_rqst *, __be32 *);
+int nfssvc_decode_fhandle(struct svc_rqst *, __be32 *);
+int nfssvc_decode_sattrargs(struct svc_rqst *, __be32 *);
+int nfssvc_decode_diropargs(struct svc_rqst *, __be32 *);
+int nfssvc_decode_readargs(struct svc_rqst *, __be32 *);
+int nfssvc_decode_writeargs(struct svc_rqst *, __be32 *);
+int nfssvc_decode_createargs(struct svc_rqst *, __be32 *);
+int nfssvc_decode_renameargs(struct svc_rqst *, __be32 *);
+int nfssvc_decode_readlinkargs(struct svc_rqst *, __be32 *);
+int nfssvc_decode_linkargs(struct svc_rqst *, __be32 *);
+int nfssvc_decode_symlinkargs(struct svc_rqst *, __be32 *);
+int nfssvc_decode_readdirargs(struct svc_rqst *, __be32 *);
+int nfssvc_encode_void(struct svc_rqst *, __be32 *);
+int nfssvc_encode_attrstat(struct svc_rqst *, __be32 *);
+int nfssvc_encode_diropres(struct svc_rqst *, __be32 *);
+int nfssvc_encode_readlinkres(struct svc_rqst *, __be32 *);
+int nfssvc_encode_readres(struct svc_rqst *, __be32 *);
+int nfssvc_encode_statfsres(struct svc_rqst *, __be32 *);
+int nfssvc_encode_readdirres(struct svc_rqst *, __be32 *);
int nfssvc_encode_entry(void *, const char *name,
int namlen, loff_t offset, u64 ino, unsigned int);
-int nfssvc_release_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
+void nfssvc_release_fhandle(struct svc_rqst *);
/* Helper functions for NFSv2 ACL code */
__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat);
diff --git a/fs/nfsd/xdr3.h b/fs/nfsd/xdr3.h
index 335e04aaf7db..80d7da620e91 100644
--- a/fs/nfsd/xdr3.h
+++ b/fs/nfsd/xdr3.h
@@ -269,71 +269,41 @@ union nfsd3_xdrstore {
#define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore)
-int nfs3svc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
-int nfs3svc_decode_sattrargs(struct svc_rqst *, __be32 *,
- struct nfsd3_sattrargs *);
-int nfs3svc_decode_diropargs(struct svc_rqst *, __be32 *,
- struct nfsd3_diropargs *);
-int nfs3svc_decode_accessargs(struct svc_rqst *, __be32 *,
- struct nfsd3_accessargs *);
-int nfs3svc_decode_readargs(struct svc_rqst *, __be32 *,
- struct nfsd3_readargs *);
-int nfs3svc_decode_writeargs(struct svc_rqst *, __be32 *,
- struct nfsd3_writeargs *);
-int nfs3svc_decode_createargs(struct svc_rqst *, __be32 *,
- struct nfsd3_createargs *);
-int nfs3svc_decode_mkdirargs(struct svc_rqst *, __be32 *,
- struct nfsd3_createargs *);
-int nfs3svc_decode_mknodargs(struct svc_rqst *, __be32 *,
- struct nfsd3_mknodargs *);
-int nfs3svc_decode_renameargs(struct svc_rqst *, __be32 *,
- struct nfsd3_renameargs *);
-int nfs3svc_decode_readlinkargs(struct svc_rqst *, __be32 *,
- struct nfsd3_readlinkargs *);
-int nfs3svc_decode_linkargs(struct svc_rqst *, __be32 *,
- struct nfsd3_linkargs *);
-int nfs3svc_decode_symlinkargs(struct svc_rqst *, __be32 *,
- struct nfsd3_symlinkargs *);
-int nfs3svc_decode_readdirargs(struct svc_rqst *, __be32 *,
- struct nfsd3_readdirargs *);
-int nfs3svc_decode_readdirplusargs(struct svc_rqst *, __be32 *,
- struct nfsd3_readdirargs *);
-int nfs3svc_decode_commitargs(struct svc_rqst *, __be32 *,
- struct nfsd3_commitargs *);
-int nfs3svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
-int nfs3svc_encode_attrstat(struct svc_rqst *, __be32 *,
- struct nfsd3_attrstat *);
-int nfs3svc_encode_wccstat(struct svc_rqst *, __be32 *,
- struct nfsd3_attrstat *);
-int nfs3svc_encode_diropres(struct svc_rqst *, __be32 *,
- struct nfsd3_diropres *);
-int nfs3svc_encode_accessres(struct svc_rqst *, __be32 *,
- struct nfsd3_accessres *);
-int nfs3svc_encode_readlinkres(struct svc_rqst *, __be32 *,
- struct nfsd3_readlinkres *);
-int nfs3svc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd3_readres *);
-int nfs3svc_encode_writeres(struct svc_rqst *, __be32 *, struct nfsd3_writeres *);
-int nfs3svc_encode_createres(struct svc_rqst *, __be32 *,
- struct nfsd3_diropres *);
-int nfs3svc_encode_renameres(struct svc_rqst *, __be32 *,
- struct nfsd3_renameres *);
-int nfs3svc_encode_linkres(struct svc_rqst *, __be32 *,
- struct nfsd3_linkres *);
-int nfs3svc_encode_readdirres(struct svc_rqst *, __be32 *,
- struct nfsd3_readdirres *);
-int nfs3svc_encode_fsstatres(struct svc_rqst *, __be32 *,
- struct nfsd3_fsstatres *);
-int nfs3svc_encode_fsinfores(struct svc_rqst *, __be32 *,
- struct nfsd3_fsinfores *);
-int nfs3svc_encode_pathconfres(struct svc_rqst *, __be32 *,
- struct nfsd3_pathconfres *);
-int nfs3svc_encode_commitres(struct svc_rqst *, __be32 *,
- struct nfsd3_commitres *);
-
-int nfs3svc_release_fhandle(struct svc_rqst *, __be32 *,
- struct nfsd3_attrstat *);
-int nfs3svc_release_fhandle2(struct svc_rqst *, __be32 *,
- struct nfsd3_fhandle_pair *);
+int nfs3svc_decode_fhandle(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_sattrargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_diropargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_accessargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_readargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_writeargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_createargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_mkdirargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_mknodargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_renameargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_readlinkargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_linkargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_symlinkargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_readdirargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_readdirplusargs(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_commitargs(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_voidres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_attrstat(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_wccstat(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_diropres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_accessres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_readlinkres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_readres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_writeres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_createres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_renameres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_linkres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_readdirres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_fsstatres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_fsinfores(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_pathconfres(struct svc_rqst *, __be32 *);
+int nfs3svc_encode_commitres(struct svc_rqst *, __be32 *);
+
+void nfs3svc_release_fhandle(struct svc_rqst *);
+void nfs3svc_release_fhandle2(struct svc_rqst *);
int nfs3svc_encode_entry(void *, const char *name,
int namlen, loff_t offset, u64 ino,
unsigned int);
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 8fda4abdf3b1..72c6ad136107 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -539,7 +539,7 @@ struct nfsd4_seek {
struct nfsd4_op {
int opnum;
__be32 status;
- union {
+ union nfsd4_op_u {
struct nfsd4_access access;
struct nfsd4_close close;
struct nfsd4_commit commit;
@@ -577,6 +577,7 @@ struct nfsd4_op {
struct nfsd4_bind_conn_to_session bind_conn_to_session;
struct nfsd4_create_session create_session;
struct nfsd4_destroy_session destroy_session;
+ struct nfsd4_destroy_clientid destroy_clientid;
struct nfsd4_sequence sequence;
struct nfsd4_reclaim_complete reclaim_complete;
struct nfsd4_test_stateid test_stateid;
@@ -585,6 +586,7 @@ struct nfsd4_op {
struct nfsd4_layoutget layoutget;
struct nfsd4_layoutcommit layoutcommit;
struct nfsd4_layoutreturn layoutreturn;
+ struct nfsd4_secinfo_no_name secinfo_no_name;
/* NFSv4.2 */
struct nfsd4_fallocate allocate;
@@ -682,11 +684,9 @@ set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp);
-int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
-int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *,
- struct nfsd4_compoundargs *);
-int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *,
- struct nfsd4_compoundres *);
+int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *);
+int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *);
+int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *);
__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
void nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op);
@@ -695,27 +695,26 @@ __be32 nfsd4_encode_fattr_to_buf(__be32 **p, int words,
struct dentry *dentry,
u32 *bmval, struct svc_rqst *, int ignore_crossmnt);
extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_setclientid *setclid);
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_setclientid_confirm *setclientid_confirm);
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *, struct nfsd4_exchange_id *);
-extern __be32 nfsd4_backchannel_ctl(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_backchannel_ctl *);
-extern __be32 nfsd4_bind_conn_to_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_bind_conn_to_session *);
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
+extern __be32 nfsd4_backchannel_ctl(struct svc_rqst *,
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
+extern __be32 nfsd4_bind_conn_to_session(struct svc_rqst *,
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
extern __be32 nfsd4_create_session(struct svc_rqst *,
- struct nfsd4_compound_state *,
- struct nfsd4_create_session *);
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
extern __be32 nfsd4_sequence(struct svc_rqst *,
- struct nfsd4_compound_state *,
- struct nfsd4_sequence *);
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
extern void nfsd4_sequence_done(struct nfsd4_compoundres *resp);
extern __be32 nfsd4_destroy_session(struct svc_rqst *,
- struct nfsd4_compound_state *,
- struct nfsd4_destroy_session *);
-extern __be32 nfsd4_destroy_clientid(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_destroy_clientid *);
-__be32 nfsd4_reclaim_complete(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_reclaim_complete *);
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
+extern __be32 nfsd4_destroy_clientid(struct svc_rqst *, struct nfsd4_compound_state *,
+ union nfsd4_op_u *u);
+__be32 nfsd4_reclaim_complete(struct svc_rqst *, struct nfsd4_compound_state *,
+ union nfsd4_op_u *u);
extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
struct nfsd4_open *open, struct nfsd_net *nn);
extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
@@ -724,34 +723,29 @@ extern void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate);
extern void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
struct nfsd4_open *open);
extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc);
-extern __be32 nfsd4_close(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_close *close);
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
+extern __be32 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
+ union nfsd4_op_u *u);
extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_open_downgrade *od);
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
- struct nfsd4_lock *lock);
-extern __be32 nfsd4_lockt(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_lockt *lockt);
-extern __be32 nfsd4_locku(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_locku *locku);
+ union nfsd4_op_u *u);
+extern __be32 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
+ union nfsd4_op_u *u);
+extern __be32 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
+ union nfsd4_op_u *u);
extern __be32
nfsd4_release_lockowner(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_release_lockowner *rlockowner);
-extern int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp);
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
+extern void nfsd4_release_compoundargs(struct svc_rqst *rqstp);
extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *, struct nfsd4_delegreturn *dr);
-extern __be32 nfsd4_renew(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *, clientid_t *clid);
+ struct nfsd4_compound_state *, union nfsd4_op_u *u);
+extern __be32 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
+ union nfsd4_op_u *u);
extern __be32 nfsd4_test_stateid(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *, struct nfsd4_test_stateid *test_stateid);
+ struct nfsd4_compound_state *, union nfsd4_op_u *);
extern __be32 nfsd4_free_stateid(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *, struct nfsd4_free_stateid *free_stateid);
+ struct nfsd4_compound_state *, union nfsd4_op_u *);
extern void nfsd4_bump_seqid(struct nfsd4_compound_state *, __be32 nfserr);
#endif
diff --git a/fs/nsfs.c b/fs/nsfs.c
index f3db56e83dd2..08127a2b8559 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -53,7 +53,6 @@ static void nsfs_evict(struct inode *inode)
static void *__ns_get_path(struct path *path, struct ns_common *ns)
{
struct vfsmount *mnt = nsfs_mnt;
- struct qstr qname = { .name = "", };
struct dentry *dentry;
struct inode *inode;
unsigned long d;
@@ -85,7 +84,7 @@ slow:
inode->i_fop = &ns_file_operations;
inode->i_private = ns;
- dentry = d_alloc_pseudo(mnt->mnt_sb, &qname);
+ dentry = d_alloc_pseudo(mnt->mnt_sb, &empty_name);
if (!dentry) {
iput(inode);
return ERR_PTR(-ENOMEM);
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index 8c9034ee7383..ee14af9e26f2 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -13,6 +13,7 @@
#include <linux/buffer_head.h>
#include <linux/vmalloc.h>
#include <linux/writeback.h>
+#include <linux/seq_file.h>
#include <linux/crc-itu-t.h>
#include "omfs.h"
@@ -290,12 +291,40 @@ static int omfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
+/*
+ * Display the mount options in /proc/mounts.
+ */
+static int omfs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct omfs_sb_info *sbi = OMFS_SB(root->d_sb);
+ umode_t cur_umask = current_umask();
+
+ if (!uid_eq(sbi->s_uid, current_uid()))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, sbi->s_uid));
+ if (!gid_eq(sbi->s_gid, current_gid()))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, sbi->s_gid));
+
+ if (sbi->s_dmask == sbi->s_fmask) {
+ if (sbi->s_fmask != cur_umask)
+ seq_printf(m, ",umask=%o", sbi->s_fmask);
+ } else {
+ if (sbi->s_dmask != cur_umask)
+ seq_printf(m, ",dmask=%o", sbi->s_dmask);
+ if (sbi->s_fmask != cur_umask)
+ seq_printf(m, ",fmask=%o", sbi->s_fmask);
+ }
+
+ return 0;
+}
+
static const struct super_operations omfs_sops = {
.write_inode = omfs_write_inode,
.evict_inode = omfs_evict_inode,
.put_super = omfs_put_super,
.statfs = omfs_statfs,
- .show_options = generic_show_options,
+ .show_options = omfs_show_options,
};
/*
@@ -434,8 +463,6 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent)
struct inode *root;
int ret = -EINVAL;
- save_mount_options(sb, (char *) data);
-
sbi = kzalloc(sizeof(struct omfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 5c7c273e17ec..5a1bed6c8c6a 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -35,6 +35,19 @@ static const match_table_t tokens = {
uint64_t orangefs_features;
+static int orangefs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(root->d_sb);
+
+ if (root->d_sb->s_flags & MS_POSIXACL)
+ seq_puts(m, ",acl");
+ if (orangefs_sb->flags & ORANGEFS_OPT_INTR)
+ seq_puts(m, ",intr");
+ if (orangefs_sb->flags & ORANGEFS_OPT_LOCAL_LOCK)
+ seq_puts(m, ",local_lock");
+ return 0;
+}
+
static int parse_mount_options(struct super_block *sb, char *options,
int silent)
{
@@ -305,7 +318,7 @@ static const struct super_operations orangefs_s_ops = {
.drop_inode = generic_delete_inode,
.statfs = orangefs_statfs,
.remount_fs = orangefs_remount_fs,
- .show_options = generic_show_options,
+ .show_options = orangefs_show_options,
};
static struct dentry *orangefs_fh_to_dentry(struct super_block *sb,
diff --git a/fs/pipe.c b/fs/pipe.c
index 73b84baf58f8..97e5be897753 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -739,13 +739,12 @@ int create_pipe_files(struct file **res, int flags)
struct inode *inode = get_pipe_inode();
struct file *f;
struct path path;
- static struct qstr name = { .name = "" };
if (!inode)
return -ENFILE;
err = -ENOMEM;
- path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
+ path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &empty_name);
if (!path.dentry)
goto err_inode;
path.mnt = mntget(pipe_mnt);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index f1e1927ccd48..719c2e943ea1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1355,6 +1355,49 @@ static const struct file_operations proc_fault_inject_operations = {
.write = proc_fault_inject_write,
.llseek = generic_file_llseek,
};
+
+static ssize_t proc_fail_nth_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task;
+ int err;
+ unsigned int n;
+
+ err = kstrtouint_from_user(buf, count, 0, &n);
+ if (err)
+ return err;
+
+ task = get_proc_task(file_inode(file));
+ if (!task)
+ return -ESRCH;
+ WRITE_ONCE(task->fail_nth, n);
+ put_task_struct(task);
+
+ return count;
+}
+
+static ssize_t proc_fail_nth_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task;
+ char numbuf[PROC_NUMBUF];
+ ssize_t len;
+
+ task = get_proc_task(file_inode(file));
+ if (!task)
+ return -ESRCH;
+ len = snprintf(numbuf, sizeof(numbuf), "%u\n",
+ READ_ONCE(task->fail_nth));
+ len = simple_read_from_buffer(buf, count, ppos, numbuf, len);
+ put_task_struct(task);
+
+ return len;
+}
+
+static const struct file_operations proc_fail_nth_operations = {
+ .read = proc_fail_nth_read,
+ .write = proc_fail_nth_write,
+};
#endif
@@ -2919,6 +2962,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#endif
#ifdef CONFIG_FAULT_INJECTION
REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
+ REG("fail-nth", 0644, proc_fail_nth_operations),
#endif
#ifdef CONFIG_ELF_CORE
REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
@@ -3311,6 +3355,7 @@ static const struct pid_entry tid_base_stuff[] = {
#endif
#ifdef CONFIG_FAULT_INJECTION
REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
+ REG("fail-nth", 0644, proc_fail_nth_operations),
#endif
#ifdef CONFIG_TASK_IO_ACCOUNTING
ONE("io", S_IRUSR, proc_tid_io_accounting),
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index c5ae09b6c726..aa2b89071630 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -51,7 +51,7 @@ struct proc_dir_entry {
spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
u8 namelen;
char name[];
-};
+} __randomize_layout;
union proc_op {
int (*proc_get_link)(struct dentry *, struct path *);
@@ -67,10 +67,10 @@ struct proc_inode {
struct proc_dir_entry *pde;
struct ctl_table_header *sysctl;
struct ctl_table *sysctl_entry;
- struct list_head sysctl_inodes;
+ struct hlist_node sysctl_inodes;
const struct proc_ns_operations *ns_ops;
struct inode vfs_inode;
-};
+} __randomize_layout;
/*
* General functions
@@ -279,7 +279,7 @@ struct proc_maps_private {
#ifdef CONFIG_NUMA
struct mempolicy *task_mempolicy;
#endif
-};
+} __randomize_layout;
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 67985a7233c2..8f479229b349 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -191,7 +191,7 @@ static void init_header(struct ctl_table_header *head,
head->set = set;
head->parent = NULL;
head->node = node;
- INIT_LIST_HEAD(&head->inodes);
+ INIT_HLIST_HEAD(&head->inodes);
if (node) {
struct ctl_table *entry;
for (entry = table; entry->procname; entry++, node++)
@@ -261,25 +261,42 @@ static void unuse_table(struct ctl_table_header *p)
complete(p->unregistering);
}
-/* called under sysctl_lock */
static void proc_sys_prune_dcache(struct ctl_table_header *head)
{
- struct inode *inode, *prev = NULL;
+ struct inode *inode;
struct proc_inode *ei;
+ struct hlist_node *node;
+ struct super_block *sb;
rcu_read_lock();
- list_for_each_entry_rcu(ei, &head->inodes, sysctl_inodes) {
- inode = igrab(&ei->vfs_inode);
- if (inode) {
- rcu_read_unlock();
- iput(prev);
- prev = inode;
- d_prune_aliases(inode);
+ for (;;) {
+ node = hlist_first_rcu(&head->inodes);
+ if (!node)
+ break;
+ ei = hlist_entry(node, struct proc_inode, sysctl_inodes);
+ spin_lock(&sysctl_lock);
+ hlist_del_init_rcu(&ei->sysctl_inodes);
+ spin_unlock(&sysctl_lock);
+
+ inode = &ei->vfs_inode;
+ sb = inode->i_sb;
+ if (!atomic_inc_not_zero(&sb->s_active))
+ continue;
+ inode = igrab(inode);
+ rcu_read_unlock();
+ if (unlikely(!inode)) {
+ deactivate_super(sb);
rcu_read_lock();
+ continue;
}
+
+ d_prune_aliases(inode);
+ iput(inode);
+ deactivate_super(sb);
+
+ rcu_read_lock();
}
rcu_read_unlock();
- iput(prev);
}
/* called under sysctl_lock, will reacquire if has to wait */
@@ -461,7 +478,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
}
ei->sysctl = head;
ei->sysctl_entry = table;
- list_add_rcu(&ei->sysctl_inodes, &head->inodes);
+ hlist_add_head_rcu(&ei->sysctl_inodes, &head->inodes);
head->count++;
spin_unlock(&sysctl_lock);
@@ -489,7 +506,7 @@ out:
void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head)
{
spin_lock(&sysctl_lock);
- list_del_rcu(&PROC_I(inode)->sysctl_inodes);
+ hlist_del_init_rcu(&PROC_I(inode)->sysctl_inodes);
if (!--head->count)
kfree_rcu(head, rcu);
spin_unlock(&sysctl_lock);
@@ -1061,16 +1078,30 @@ static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...)
return -EINVAL;
}
+static int sysctl_check_table_array(const char *path, struct ctl_table *table)
+{
+ int err = 0;
+
+ if ((table->proc_handler == proc_douintvec) ||
+ (table->proc_handler == proc_douintvec_minmax)) {
+ if (table->maxlen != sizeof(unsigned int))
+ err |= sysctl_err(path, table, "array now allowed");
+ }
+
+ return err;
+}
+
static int sysctl_check_table(const char *path, struct ctl_table *table)
{
int err = 0;
for (; table->procname; table++) {
if (table->child)
- err = sysctl_err(path, table, "Not a file");
+ err |= sysctl_err(path, table, "Not a file");
if ((table->proc_handler == proc_dostring) ||
(table->proc_handler == proc_dointvec) ||
(table->proc_handler == proc_douintvec) ||
+ (table->proc_handler == proc_douintvec_minmax) ||
(table->proc_handler == proc_dointvec_minmax) ||
(table->proc_handler == proc_dointvec_jiffies) ||
(table->proc_handler == proc_dointvec_userhz_jiffies) ||
@@ -1078,15 +1109,17 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
(table->proc_handler == proc_doulongvec_minmax) ||
(table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
if (!table->data)
- err = sysctl_err(path, table, "No data");
+ err |= sysctl_err(path, table, "No data");
if (!table->maxlen)
- err = sysctl_err(path, table, "No maxlen");
+ err |= sysctl_err(path, table, "No maxlen");
+ else
+ err |= sysctl_check_table_array(path, table);
}
if (!table->proc_handler)
- err = sysctl_err(path, table, "No proc_handler");
+ err |= sysctl_err(path, table, "No proc_handler");
if ((table->mode & (S_IRUGO|S_IWUGO)) != table->mode)
- err = sysctl_err(path, table, "bogus .mode 0%o",
+ err |= sysctl_err(path, table, "bogus .mode 0%o",
table->mode);
}
return err;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 4d02c3b65061..fefd22611cf6 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -283,6 +283,16 @@ static void parse_options(char *options)
}
}
+/*
+ * Display the mount options in /proc/mounts.
+ */
+static int pstore_show_options(struct seq_file *m, struct dentry *root)
+{
+ if (kmsg_bytes != PSTORE_DEFAULT_KMSG_BYTES)
+ seq_printf(m, ",kmsg_bytes=%lu", kmsg_bytes);
+ return 0;
+}
+
static int pstore_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
@@ -296,7 +306,7 @@ static const struct super_operations pstore_ops = {
.drop_inode = generic_delete_inode,
.evict_inode = pstore_evict_inode,
.remount_fs = pstore_remount,
- .show_options = generic_show_options,
+ .show_options = pstore_show_options,
};
static struct super_block *pstore_sb;
@@ -448,8 +458,6 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
- save_mount_options(sb, data);
-
pstore_sb = sb;
sb->s_maxbytes = MAX_LFS_FILESIZE;
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 58051265626f..7f4e48c8d188 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -5,6 +5,9 @@
#include <linux/time.h>
#include <linux/pstore.h>
+#define PSTORE_DEFAULT_KMSG_BYTES 10240
+extern unsigned long kmsg_bytes;
+
#ifdef CONFIG_PSTORE_FTRACE
extern void pstore_register_ftrace(void);
extern void pstore_unregister_ftrace(void);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 1b6e0ff6bff5..2b21d180157c 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -99,7 +99,7 @@ static char *big_oops_buf;
static size_t big_oops_buf_sz;
/* How much of the console log to snapshot */
-static unsigned long kmsg_bytes = 10240;
+unsigned long kmsg_bytes = PSTORE_DEFAULT_KMSG_BYTES;
void pstore_set_kmsg_bytes(int bytes)
{
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 26e45863e499..11201b2d06b9 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -38,6 +38,14 @@
#include <linux/uaccess.h>
#include "internal.h"
+struct ramfs_mount_opts {
+ umode_t mode;
+};
+
+struct ramfs_fs_info {
+ struct ramfs_mount_opts mount_opts;
+};
+
#define RAMFS_DEFAULT_MODE 0755
static const struct super_operations ramfs_ops;
@@ -149,14 +157,22 @@ static const struct inode_operations ramfs_dir_inode_operations = {
.rename = simple_rename,
};
+/*
+ * Display the mount options in /proc/mounts.
+ */
+static int ramfs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct ramfs_fs_info *fsi = root->d_sb->s_fs_info;
+
+ if (fsi->mount_opts.mode != RAMFS_DEFAULT_MODE)
+ seq_printf(m, ",mode=%o", fsi->mount_opts.mode);
+ return 0;
+}
+
static const struct super_operations ramfs_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
- .show_options = generic_show_options,
-};
-
-struct ramfs_mount_opts {
- umode_t mode;
+ .show_options = ramfs_show_options,
};
enum {
@@ -169,10 +185,6 @@ static const match_table_t tokens = {
{Opt_err, NULL}
};
-struct ramfs_fs_info {
- struct ramfs_mount_opts mount_opts;
-};
-
static int ramfs_parse_options(char *data, struct ramfs_mount_opts *opts)
{
substring_t args[MAX_OPT_ARGS];
@@ -211,8 +223,6 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent)
struct inode *inode;
int err;
- save_mount_options(sb, data);
-
fsi = kzalloc(sizeof(struct ramfs_fs_info), GFP_KERNEL);
sb->s_fs_info = fsi;
if (!fsi)
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index dc198bc64c61..edc8ef78b63f 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -513,9 +513,17 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th,
"inode has negative prealloc blocks count.");
#endif
while (ei->i_prealloc_count > 0) {
- reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
- ei->i_prealloc_block++;
+ b_blocknr_t block_to_free;
+
+ /*
+ * reiserfs_free_prealloc_block can drop the write lock,
+ * which could allow another caller to free the same block.
+ * We can protect against it by modifying the prealloc
+ * state before calling it.
+ */
+ block_to_free = ei->i_prealloc_block++;
ei->i_prealloc_count--;
+ reiserfs_free_prealloc_block(th, inode, block_to_free);
dirty = 1;
}
if (dirty)
@@ -1128,7 +1136,7 @@ static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
hint->prealloc_size = 0;
if (!hint->formatted_node && hint->preallocate) {
- if (S_ISREG(hint->inode->i_mode)
+ if (S_ISREG(hint->inode->i_mode) && !IS_PRIVATE(hint->inode)
&& hint->inode->i_size >=
REISERFS_SB(hint->th->t_super)->s_alloc_options.
preallocmin * hint->inode->i_sb->s_blocksize)
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 685f1e056998..306e4e9d172d 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1599,8 +1599,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
}
out_ok_unlocked:
- if (new_opts)
- replace_mount_options(s, new_opts);
return 0;
out_err_unlock:
@@ -1916,8 +1914,6 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
char *qf_names[REISERFS_MAXQUOTAS] = {};
unsigned int qfmt = 0;
- save_mount_options(s, data);
-
sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 3d2256a425ee..54415f0e3d18 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -23,7 +23,8 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
struct reiserfs_transaction_handle th;
size_t jcreate_blocks;
int size = acl ? posix_acl_xattr_size(acl->a_count) : 0;
-
+ int update_mode = 0;
+ umode_t mode = inode->i_mode;
/*
* Pessimism: We can't assume that anything from the xattr root up
@@ -37,7 +38,16 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
error = journal_begin(&th, inode->i_sb, jcreate_blocks);
reiserfs_write_unlock(inode->i_sb);
if (error == 0) {
+ if (type == ACL_TYPE_ACCESS && acl) {
+ error = posix_acl_update_mode(inode, &mode, &acl);
+ if (error)
+ goto unlock;
+ update_mode = 1;
+ }
error = __reiserfs_set_acl(&th, inode, type, acl);
+ if (!error && update_mode)
+ inode->i_mode = mode;
+unlock:
reiserfs_write_lock(inode->i_sb);
error2 = journal_end(&th);
reiserfs_write_unlock(inode->i_sb);
@@ -241,11 +251,6 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
- if (acl) {
- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (error)
- return error;
- }
break;
case ACL_TYPE_DEFAULT:
name = XATTR_NAME_POSIX_ACL_DEFAULT;
diff --git a/fs/super.c b/fs/super.c
index adb0c0de428c..6bc3352adcf3 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -168,7 +168,6 @@ static void destroy_super(struct super_block *s)
WARN_ON(!list_empty(&s->s_mounts));
put_user_ns(s->s_user_ns);
kfree(s->s_subtype);
- kfree(s->s_options);
call_rcu(&s->rcu, destroy_super_rcu);
}
@@ -508,7 +507,7 @@ retry:
return ERR_PTR(-ENOMEM);
goto retry;
}
-
+
err = set(s, data);
if (err) {
spin_unlock(&sb_lock);
@@ -771,7 +770,7 @@ restart:
spin_unlock(&sb_lock);
return NULL;
}
-
+
struct super_block *user_get_super(dev_t dev)
{
struct super_block *sb;
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 328e89c2cf83..bea8ad876bf9 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -270,8 +270,6 @@ static int trace_fill_super(struct super_block *sb, void *data, int silent)
struct tracefs_fs_info *fsi;
int err;
- save_mount_options(sb, data);
-
fsi = kzalloc(sizeof(struct tracefs_fs_info), GFP_KERNEL);
sb->s_fs_info = fsi;
if (!fsi) {
diff --git a/fs/ubifs/crypto.c b/fs/ubifs/crypto.c
index 382ed428cfd2..114ba455bac3 100644
--- a/fs/ubifs/crypto.c
+++ b/fs/ubifs/crypto.c
@@ -9,8 +9,13 @@ static int ubifs_crypt_get_context(struct inode *inode, void *ctx, size_t len)
static int ubifs_crypt_set_context(struct inode *inode, const void *ctx,
size_t len, void *fs_data)
{
+ /*
+ * Creating an encryption context is done unlocked since we
+ * operate on a new inode which is not visible to other users
+ * at this point. So, no need to check whether inode is locked.
+ */
return ubifs_xattr_set(inode, UBIFS_XATTR_NAME_ENCRYPTION_CONTEXT,
- ctx, len, 0);
+ ctx, len, 0, false);
}
static bool ubifs_crypt_empty_dir(struct inode *inode)
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 566079d9b402..417fe0b29f23 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -143,6 +143,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
case S_IFBLK:
case S_IFCHR:
inode->i_op = &ubifs_file_inode_operations;
+ encrypted = false;
break;
default:
BUG();
@@ -1061,7 +1062,6 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
int sz_change;
int err, devlen = 0;
struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
- .new_ino_d = ALIGN(devlen, 8),
.dirtied_ino = 1 };
struct fscrypt_name nm;
@@ -1079,6 +1079,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
devlen = ubifs_encode_dev(dev, rdev);
}
+ req.new_ino_d = ALIGN(devlen, 8);
err = ubifs_budget_space(c, &req);
if (err) {
kfree(dev);
@@ -1396,17 +1397,14 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
dev = kmalloc(sizeof(union ubifs_dev_desc), GFP_NOFS);
if (!dev) {
- ubifs_release_budget(c, &req);
- ubifs_release_budget(c, &ino_req);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_release;
}
err = do_tmpfile(old_dir, old_dentry, S_IFCHR | WHITEOUT_MODE, &whiteout);
if (err) {
- ubifs_release_budget(c, &req);
- ubifs_release_budget(c, &ino_req);
kfree(dev);
- return err;
+ goto out_release;
}
whiteout->i_state |= I_LINKABLE;
@@ -1494,12 +1492,10 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
err = ubifs_budget_space(c, &wht_req);
if (err) {
- ubifs_release_budget(c, &req);
- ubifs_release_budget(c, &ino_req);
kfree(whiteout_ui->data);
whiteout_ui->data_len = 0;
iput(whiteout);
- return err;
+ goto out_release;
}
inc_nlink(whiteout);
@@ -1554,6 +1550,7 @@ out_cancel:
iput(whiteout);
}
unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
+out_release:
ubifs_release_budget(c, &ino_req);
ubifs_release_budget(c, &req);
fscrypt_free_filename(&old_nm);
@@ -1647,6 +1644,21 @@ int ubifs_getattr(const struct path *path, struct kstat *stat,
struct ubifs_inode *ui = ubifs_inode(inode);
mutex_lock(&ui->ui_mutex);
+
+ if (ui->flags & UBIFS_APPEND_FL)
+ stat->attributes |= STATX_ATTR_APPEND;
+ if (ui->flags & UBIFS_COMPR_FL)
+ stat->attributes |= STATX_ATTR_COMPRESSED;
+ if (ui->flags & UBIFS_CRYPT_FL)
+ stat->attributes |= STATX_ATTR_ENCRYPTED;
+ if (ui->flags & UBIFS_IMMUTABLE_FL)
+ stat->attributes |= STATX_ATTR_IMMUTABLE;
+
+ stat->attributes_mask |= (STATX_ATTR_APPEND |
+ STATX_ATTR_COMPRESSED |
+ STATX_ATTR_ENCRYPTED |
+ STATX_ATTR_IMMUTABLE);
+
generic_fillattr(inode, stat);
stat->blksize = UBIFS_BLOCK_SIZE;
stat->size = ui->ui_size;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 2cda3d67e2d0..8cad0b19b404 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -735,6 +735,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
int err, page_idx, page_cnt, ret = 0, n = 0;
int allocate = bu->buf ? 0 : 1;
loff_t isize;
+ gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
err = ubifs_tnc_get_bu_keys(c, bu);
if (err)
@@ -796,8 +797,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
if (page_offset > end_index)
break;
- page = find_or_create_page(mapping, page_offset,
- GFP_NOFS | __GFP_COLD);
+ page = find_or_create_page(mapping, page_offset, ra_gfp_mask);
if (!page)
break;
if (!PageUptodate(page))
@@ -1284,6 +1284,14 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
return err;
+ if (ubifs_crypt_is_encrypted(inode) && (attr->ia_valid & ATTR_SIZE)) {
+ err = fscrypt_get_encryption_info(inode);
+ if (err)
+ return err;
+ if (!fscrypt_has_encryption_key(inode))
+ return -ENOKEY;
+ }
+
if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
/* Truncation to a smaller size */
err = do_truncation(c, inode, attr);
@@ -1607,15 +1615,6 @@ static const struct vm_operations_struct ubifs_file_vm_ops = {
static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
int err;
- struct inode *inode = file->f_mapping->host;
-
- if (ubifs_crypt_is_encrypted(inode)) {
- err = fscrypt_get_encryption_info(inode);
- if (err)
- return -EACCES;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
err = generic_file_mmap(file, vma);
if (err)
@@ -1698,12 +1697,6 @@ static const char *ubifs_get_link(struct dentry *dentry,
pstr.name[pstr.len] = '\0';
- // XXX this probably won't happen anymore...
- if (pstr.name[0] == '\0') {
- fscrypt_fname_free_buffer(&pstr);
- return ERR_PTR(-ENOENT);
- }
-
set_delayed_call(done, kfree_link, pstr.name);
return pstr.name;
}
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 294519b98874..04c4ec6483e5 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -549,8 +549,6 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
struct ubifs_ino_node *ino;
union ubifs_key dent_key, ino_key;
- //dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
- // inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
@@ -574,7 +572,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
/* Make sure to also account for extended attributes */
len += host_ui->data_len;
- dent = kmalloc(len, GFP_NOFS);
+ dent = kzalloc(len, GFP_NOFS);
if (!dent)
return -ENOMEM;
@@ -585,7 +583,10 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
if (!xent) {
dent->ch.node_type = UBIFS_DENT_NODE;
- dent_key_init(c, &dent_key, dir->i_ino, nm);
+ if (nm->hash)
+ dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash);
+ else
+ dent_key_init(c, &dent_key, dir->i_ino, nm);
} else {
dent->ch.node_type = UBIFS_XENT_NODE;
xent_key_init(c, &dent_key, dir->i_ino, nm);
@@ -629,7 +630,10 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
kfree(dent);
if (deletion) {
- err = ubifs_tnc_remove_nm(c, &dent_key, nm);
+ if (nm->hash)
+ err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash);
+ else
+ err = ubifs_tnc_remove_nm(c, &dent_key, nm);
if (err)
goto out_ro;
err = ubifs_add_dirt(c, lnum, dlen);
@@ -950,9 +954,6 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
int twoparents = (fst_dir != snd_dir);
void *p;
- //dbg_jnl("dent '%pd' in dir ino %lu between dent '%pd' in dir ino %lu",
- // fst_dentry, fst_dir->i_ino, snd_dentry, snd_dir->i_ino);
-
ubifs_assert(ubifs_inode(fst_dir)->data_len == 0);
ubifs_assert(ubifs_inode(snd_dir)->data_len == 0);
ubifs_assert(mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
@@ -967,7 +968,7 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
if (twoparents)
len += plen;
- dent1 = kmalloc(len, GFP_NOFS);
+ dent1 = kzalloc(len, GFP_NOFS);
if (!dent1)
return -ENOMEM;
@@ -984,6 +985,7 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
dent1->nlen = cpu_to_le16(fname_len(snd_nm));
memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm));
dent1->name[fname_len(snd_nm)] = '\0';
+ set_dent_cookie(c, dent1);
zero_dent_node_unused(dent1);
ubifs_prep_grp_node(c, dent1, dlen1, 0);
@@ -996,6 +998,7 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
dent2->nlen = cpu_to_le16(fname_len(fst_nm));
memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm));
dent2->name[fname_len(fst_nm)] = '\0';
+ set_dent_cookie(c, dent2);
zero_dent_node_unused(dent2);
ubifs_prep_grp_node(c, dent2, dlen2, 0);
@@ -1094,8 +1097,6 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
int move = (old_dir != new_dir);
struct ubifs_inode *uninitialized_var(new_ui);
- //dbg_jnl("dent '%pd' in dir ino %lu to dent '%pd' in dir ino %lu",
- // old_dentry, old_dir->i_ino, new_dentry, new_dir->i_ino);
ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
@@ -1117,7 +1118,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
if (move)
len += plen;
- dent = kmalloc(len, GFP_NOFS);
+ dent = kzalloc(len, GFP_NOFS);
if (!dent)
return -ENOMEM;
@@ -1298,7 +1299,9 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
goto out;
}
- if (compr_type != UBIFS_COMPR_NONE) {
+ if (compr_type == UBIFS_COMPR_NONE) {
+ out_len = *new_len;
+ } else {
err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type);
if (err)
goto out;
@@ -1485,9 +1488,6 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
int sync = IS_DIRSYNC(host);
struct ubifs_inode *host_ui = ubifs_inode(host);
- //dbg_jnl("host %lu, xattr ino %lu, name '%s', data len %d",
- // host->i_ino, inode->i_ino, nm->name,
- // ubifs_inode(inode)->data_len);
ubifs_assert(inode->i_nlink == 0);
ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
@@ -1500,7 +1500,7 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
- xent = kmalloc(len, GFP_NOFS);
+ xent = kzalloc(len, GFP_NOFS);
if (!xent)
return -ENOMEM;
@@ -1607,7 +1607,7 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
aligned_len1 = ALIGN(len1, 8);
aligned_len = aligned_len1 + ALIGN(len2, 8);
- ino = kmalloc(aligned_len, GFP_NOFS);
+ ino = kzalloc(aligned_len, GFP_NOFS);
if (!ino)
return -ENOMEM;
diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h
index 7547be512db2..b1f7c0caa3ac 100644
--- a/fs/ubifs/key.h
+++ b/fs/ubifs/key.h
@@ -162,6 +162,7 @@ static inline void dent_key_init(const struct ubifs_info *c,
uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm));
ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
+ ubifs_assert(!nm->hash && !nm->minor_hash);
key->u32[0] = inum;
key->u32[1] = hash | (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS);
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index cf4cc99b75b5..bffadbb67e47 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -45,7 +45,7 @@
#define UBIFS_KMALLOC_OK (128*1024)
/* Slab cache for UBIFS inodes */
-struct kmem_cache *ubifs_inode_slab;
+static struct kmem_cache *ubifs_inode_slab;
/* UBIFS TNC shrinker description */
static struct shrinker ubifs_shrinker_info = {
@@ -446,6 +446,8 @@ static int ubifs_show_options(struct seq_file *s, struct dentry *root)
ubifs_compr_name(c->mount_opts.compr_type));
}
+ seq_printf(s, ",ubi=%d,vol=%d", c->vi.ubi_num, c->vi.vol_id);
+
return 0;
}
@@ -931,6 +933,7 @@ enum {
Opt_chk_data_crc,
Opt_no_chk_data_crc,
Opt_override_compr,
+ Opt_ignore,
Opt_err,
};
@@ -942,6 +945,8 @@ static const match_table_t tokens = {
{Opt_chk_data_crc, "chk_data_crc"},
{Opt_no_chk_data_crc, "no_chk_data_crc"},
{Opt_override_compr, "compr=%s"},
+ {Opt_ignore, "ubi=%s"},
+ {Opt_ignore, "vol=%s"},
{Opt_err, NULL},
};
@@ -1042,6 +1047,8 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
c->default_compr = c->mount_opts.compr_type;
break;
}
+ case Opt_ignore:
+ break;
default:
{
unsigned long flag;
@@ -1869,8 +1876,10 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
bu_init(c);
else {
dbg_gen("disable bulk-read");
+ mutex_lock(&c->bu_mutex);
kfree(c->bu.buf);
c->bu.buf = NULL;
+ mutex_unlock(&c->bu_mutex);
}
ubifs_assert(c->lst.taken_empty_lebs > 0);
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 709aa098dd46..0a213dcba2a1 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -1812,7 +1812,7 @@ static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
int found, n, err;
struct ubifs_znode *znode;
- //dbg_tnck(key, "name '%.*s' key ", nm->len, nm->name);
+ dbg_tnck(key, "key ");
mutex_lock(&c->tnc_mutex);
found = ubifs_lookup_level0(c, key, &znode, &n);
if (!found) {
@@ -1880,48 +1880,65 @@ int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
return do_lookup_nm(c, key, node, nm);
}
-static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
- struct ubifs_dent_node *dent, uint32_t cookie)
+static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
+ struct ubifs_dent_node *dent, uint32_t cookie,
+ struct ubifs_znode **zn, int *n)
{
- int n, err, type = key_type(c, key);
- struct ubifs_znode *znode;
+ int err;
+ struct ubifs_znode *znode = *zn;
struct ubifs_zbranch *zbr;
- union ubifs_key *dkey, start_key;
-
- ubifs_assert(is_hash_key(c, key));
-
- lowest_dent_key(c, &start_key, key_inum(c, key));
-
- mutex_lock(&c->tnc_mutex);
- err = ubifs_lookup_level0(c, &start_key, &znode, &n);
- if (unlikely(err < 0))
- goto out_unlock;
+ union ubifs_key *dkey;
for (;;) {
if (!err) {
- err = tnc_next(c, &znode, &n);
+ err = tnc_next(c, &znode, n);
if (err)
- goto out_unlock;
+ goto out;
}
- zbr = &znode->zbranch[n];
+ zbr = &znode->zbranch[*n];
dkey = &zbr->key;
if (key_inum(c, dkey) != key_inum(c, key) ||
- key_type(c, dkey) != type) {
+ key_type(c, dkey) != key_type(c, key)) {
err = -ENOENT;
- goto out_unlock;
+ goto out;
}
err = tnc_read_hashed_node(c, zbr, dent);
if (err)
- goto out_unlock;
+ goto out;
if (key_hash(c, key) == key_hash(c, dkey) &&
- le32_to_cpu(dent->cookie) == cookie)
- goto out_unlock;
+ le32_to_cpu(dent->cookie) == cookie) {
+ *zn = znode;
+ goto out;
+ }
}
+out:
+
+ return err;
+}
+
+static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
+ struct ubifs_dent_node *dent, uint32_t cookie)
+{
+ int n, err;
+ struct ubifs_znode *znode;
+ union ubifs_key start_key;
+
+ ubifs_assert(is_hash_key(c, key));
+
+ lowest_dent_key(c, &start_key, key_inum(c, key));
+
+ mutex_lock(&c->tnc_mutex);
+ err = ubifs_lookup_level0(c, &start_key, &znode, &n);
+ if (unlikely(err < 0))
+ goto out_unlock;
+
+ err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
+
out_unlock:
mutex_unlock(&c->tnc_mutex);
return err;
@@ -2393,8 +2410,7 @@ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
struct ubifs_znode *znode;
mutex_lock(&c->tnc_mutex);
- //dbg_tnck(key, "LEB %d:%d, name '%.*s', key ",
- // lnum, offs, nm->len, nm->name);
+ dbg_tnck(key, "LEB %d:%d, key ", lnum, offs);
found = lookup_level0_dirty(c, key, &znode, &n);
if (found < 0) {
err = found;
@@ -2628,7 +2644,7 @@ int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
struct ubifs_znode *znode;
mutex_lock(&c->tnc_mutex);
- //dbg_tnck(key, "%.*s, key ", nm->len, nm->name);
+ dbg_tnck(key, "key ");
err = lookup_level0_dirty(c, key, &znode, &n);
if (err < 0)
goto out_unlock;
@@ -2663,6 +2679,74 @@ out_unlock:
}
/**
+ * ubifs_tnc_remove_dh - remove an index entry for a "double hashed" node.
+ * @c: UBIFS file-system description object
+ * @key: key of node
+ * @cookie: node cookie for collision resolution
+ *
+ * Returns %0 on success or negative error code on failure.
+ */
+int ubifs_tnc_remove_dh(struct ubifs_info *c, const union ubifs_key *key,
+ uint32_t cookie)
+{
+ int n, err;
+ struct ubifs_znode *znode;
+ struct ubifs_dent_node *dent;
+ struct ubifs_zbranch *zbr;
+
+ if (!c->double_hash)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&c->tnc_mutex);
+ err = lookup_level0_dirty(c, key, &znode, &n);
+ if (err <= 0)
+ goto out_unlock;
+
+ zbr = &znode->zbranch[n];
+ dent = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
+ if (!dent) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ err = tnc_read_hashed_node(c, zbr, dent);
+ if (err)
+ goto out_free;
+
+ /* If the cookie does not match, we're facing a hash collision. */
+ if (le32_to_cpu(dent->cookie) != cookie) {
+ union ubifs_key start_key;
+
+ lowest_dent_key(c, &start_key, key_inum(c, key));
+
+ err = ubifs_lookup_level0(c, &start_key, &znode, &n);
+ if (unlikely(err < 0))
+ goto out_free;
+
+ err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
+ if (err)
+ goto out_free;
+ }
+
+ if (znode->cnext || !ubifs_zn_dirty(znode)) {
+ znode = dirty_cow_bottom_up(c, znode);
+ if (IS_ERR(znode)) {
+ err = PTR_ERR(znode);
+ goto out_free;
+ }
+ }
+ err = tnc_delete(c, znode, n);
+
+out_free:
+ kfree(dent);
+out_unlock:
+ if (!err)
+ err = dbg_check_tnc(c, 0);
+ mutex_unlock(&c->tnc_mutex);
+ return err;
+}
+
+/**
* key_in_range - determine if a key falls within a range of keys.
* @c: UBIFS file-system description object
* @key: key to check
@@ -2802,6 +2886,8 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
dbg_tnc("xent '%s', ino %lu", xent->name,
(unsigned long)xattr_inum);
+ ubifs_evict_xattr_inode(c, xattr_inum);
+
fname_name(&nm) = xent->name;
fname_len(&nm) = le16_to_cpu(xent->nlen);
err = ubifs_tnc_remove_nm(c, &key1, &nm);
@@ -2863,7 +2949,7 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
struct ubifs_zbranch *zbr;
union ubifs_key *dkey;
- //dbg_tnck(key, "%s ", nm->name ? (char *)nm->name : "(lowest)");
+ dbg_tnck(key, "key ");
ubifs_assert(is_hash_key(c, key));
mutex_lock(&c->tnc_mutex);
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index 51157da3f76e..aa31f60220ef 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -57,6 +57,8 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
ubifs_dump_znode(c, znode);
if (zbr->znode)
ubifs_dump_znode(c, zbr->znode);
+
+ return -EINVAL;
}
}
ubifs_prepare_node(c, idx, len, 0);
@@ -859,6 +861,8 @@ static int write_index(struct ubifs_info *c)
ubifs_dump_znode(c, znode);
if (zbr->znode)
ubifs_dump_znode(c, zbr->znode);
+
+ return -EINVAL;
}
}
len = ubifs_idx_node_sz(c, znode->child_cnt);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 298b4d89eee9..cd43651f1731 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1451,7 +1451,6 @@ struct ubifs_info {
extern struct list_head ubifs_infos;
extern spinlock_t ubifs_infos_lock;
extern atomic_long_t ubifs_clean_zn_cnt;
-extern struct kmem_cache *ubifs_inode_slab;
extern const struct super_operations ubifs_super_operations;
extern const struct address_space_operations ubifs_file_address_operations;
extern const struct file_operations ubifs_file_operations;
@@ -1590,6 +1589,8 @@ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key);
int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
const struct fscrypt_name *nm);
+int ubifs_tnc_remove_dh(struct ubifs_info *c, const union ubifs_key *key,
+ uint32_t cookie);
int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
union ubifs_key *to_key);
int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum);
@@ -1754,9 +1755,10 @@ int ubifs_check_dir_empty(struct inode *dir);
extern const struct xattr_handler *ubifs_xattr_handlers[];
ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size);
int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
- size_t size, int flags);
+ size_t size, int flags, bool check_lock);
ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
size_t size);
+void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum);
#ifdef CONFIG_UBIFS_FS_SECURITY
extern int ubifs_init_security(struct inode *dentry, struct inode *inode,
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 6c9e62c2ef55..c13eae819cbc 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -280,7 +280,7 @@ static struct inode *iget_xattr(struct ubifs_info *c, ino_t inum)
}
int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
- size_t size, int flags)
+ size_t size, int flags, bool check_lock)
{
struct inode *inode;
struct ubifs_info *c = host->i_sb->s_fs_info;
@@ -289,12 +289,7 @@ int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
union ubifs_key key;
int err;
- /*
- * Creating an encryption context is done unlocked since we
- * operate on a new inode which is not visible to other users
- * at this point.
- */
- if (strcmp(name, UBIFS_XATTR_NAME_ENCRYPTION_CONTEXT) != 0)
+ if (check_lock)
ubifs_assert(inode_is_locked(host));
if (size > UBIFS_MAX_INO_DATA)
@@ -513,6 +508,28 @@ out_cancel:
return err;
}
+/**
+ * ubifs_evict_xattr_inode - Evict an xattr inode.
+ * @c: UBIFS file-system description object
+ * @xattr_inum: xattr inode number
+ *
+ * When an inode that hosts xattrs is being removed we have to make sure
+ * that cached inodes of the xattrs also get removed from the inode cache
+ * otherwise we'd waste memory. This function looks up an inode from the
+ * inode cache and clears the link counter such that iput() will evict
+ * the inode.
+ */
+void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum)
+{
+ struct inode *inode;
+
+ inode = ilookup(c->vfs_sb, xattr_inum);
+ if (inode) {
+ clear_nlink(inode);
+ iput(inode);
+ }
+}
+
static int ubifs_xattr_remove(struct inode *host, const char *name)
{
struct inode *inode;
@@ -576,8 +593,12 @@ static int init_xattrs(struct inode *inode, const struct xattr *xattr_array,
}
strcpy(name, XATTR_SECURITY_PREFIX);
strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
+ /*
+ * creating a new inode without holding the inode rwsem,
+ * no need to check whether inode is locked.
+ */
err = ubifs_xattr_set(inode, name, xattr->value,
- xattr->value_len, 0);
+ xattr->value_len, 0, false);
kfree(name);
if (err < 0)
break;
@@ -624,7 +645,7 @@ static int xattr_set(const struct xattr_handler *handler,
name = xattr_full_name(handler, name);
if (value)
- return ubifs_xattr_set(inode, name, value, size, flags);
+ return ubifs_xattr_set(inode, name, value, size, flags, true);
else
return ubifs_xattr_remove(inode, name);
}
diff --git a/fs/udf/file.c b/fs/udf/file.c
index f5eb2d5b3bac..356c2bf148a5 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -43,10 +43,15 @@ static void __udf_adinicb_readpage(struct page *page)
struct inode *inode = page->mapping->host;
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
+ loff_t isize = i_size_read(inode);
+ /*
+ * We have to be careful here as truncate can change i_size under us.
+ * So just sample it once and use the same value everywhere.
+ */
kaddr = kmap_atomic(page);
- memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
- memset(kaddr + inode->i_size, 0, PAGE_SIZE - inode->i_size);
+ memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, isize);
+ memset(kaddr + isize, 0, PAGE_SIZE - isize);
flush_dcache_page(page);
SetPageUptodate(page);
kunmap_atomic(kaddr);
@@ -71,7 +76,8 @@ static int udf_adinicb_writepage(struct page *page,
BUG_ON(!PageLocked(page));
kaddr = kmap_atomic(page);
- memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size);
+ memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
+ i_size_read(inode));
SetPageUptodate(page);
kunmap_atomic(kaddr);
mark_inode_dirty(inode);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 98c510e17203..18fdb9d90812 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1222,8 +1222,8 @@ int udf_setsize(struct inode *inode, loff_t newsize)
return err;
}
set_size:
- truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
+ truncate_setsize(inode, newsize);
} else {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem);
@@ -1240,9 +1240,9 @@ set_size:
udf_get_block);
if (err)
return err;
+ truncate_setsize(inode, newsize);
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
- truncate_setsize(inode, newsize);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 14b4bc1f6801..462ac2e9258c 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -73,8 +73,6 @@
#define VDS_POS_TERMINATING_DESC 6
#define VDS_POS_LENGTH 7
-#define UDF_DEFAULT_BLOCKSIZE 2048
-
#define VSD_FIRST_SECTOR_OFFSET 32768
#define VSD_MAX_SECTOR_OFFSET 0x800000
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index 77c331f1a770..14626b34d13e 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -38,56 +38,11 @@
#include <linux/types.h>
#include <linux/kernel.h>
-
-#define EPOCH_YEAR 1970
-
-#ifndef __isleap
-/* Nonzero if YEAR is a leap year (every 4 years,
- except every 100th isn't, and every 400th is). */
-#define __isleap(year) \
- ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
-#endif
-
-/* How many days come before each month (0-12). */
-static const unsigned short int __mon_yday[2][13] = {
- /* Normal years. */
- {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365},
- /* Leap years. */
- {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}
-};
-
-#define MAX_YEAR_SECONDS 69
-#define SPD 0x15180 /*3600*24 */
-#define SPY(y, l, s) (SPD * (365 * y + l) + s)
-
-static time_t year_seconds[MAX_YEAR_SECONDS] = {
-/*1970*/ SPY(0, 0, 0), SPY(1, 0, 0), SPY(2, 0, 0), SPY(3, 1, 0),
-/*1974*/ SPY(4, 1, 0), SPY(5, 1, 0), SPY(6, 1, 0), SPY(7, 2, 0),
-/*1978*/ SPY(8, 2, 0), SPY(9, 2, 0), SPY(10, 2, 0), SPY(11, 3, 0),
-/*1982*/ SPY(12, 3, 0), SPY(13, 3, 0), SPY(14, 3, 0), SPY(15, 4, 0),
-/*1986*/ SPY(16, 4, 0), SPY(17, 4, 0), SPY(18, 4, 0), SPY(19, 5, 0),
-/*1990*/ SPY(20, 5, 0), SPY(21, 5, 0), SPY(22, 5, 0), SPY(23, 6, 0),
-/*1994*/ SPY(24, 6, 0), SPY(25, 6, 0), SPY(26, 6, 0), SPY(27, 7, 0),
-/*1998*/ SPY(28, 7, 0), SPY(29, 7, 0), SPY(30, 7, 0), SPY(31, 8, 0),
-/*2002*/ SPY(32, 8, 0), SPY(33, 8, 0), SPY(34, 8, 0), SPY(35, 9, 0),
-/*2006*/ SPY(36, 9, 0), SPY(37, 9, 0), SPY(38, 9, 0), SPY(39, 10, 0),
-/*2010*/ SPY(40, 10, 0), SPY(41, 10, 0), SPY(42, 10, 0), SPY(43, 11, 0),
-/*2014*/ SPY(44, 11, 0), SPY(45, 11, 0), SPY(46, 11, 0), SPY(47, 12, 0),
-/*2018*/ SPY(48, 12, 0), SPY(49, 12, 0), SPY(50, 12, 0), SPY(51, 13, 0),
-/*2022*/ SPY(52, 13, 0), SPY(53, 13, 0), SPY(54, 13, 0), SPY(55, 14, 0),
-/*2026*/ SPY(56, 14, 0), SPY(57, 14, 0), SPY(58, 14, 0), SPY(59, 15, 0),
-/*2030*/ SPY(60, 15, 0), SPY(61, 15, 0), SPY(62, 15, 0), SPY(63, 16, 0),
-/*2034*/ SPY(64, 16, 0), SPY(65, 16, 0), SPY(66, 16, 0), SPY(67, 17, 0),
-/*2038*/ SPY(68, 17, 0)
-};
-
-#define SECS_PER_HOUR (60 * 60)
-#define SECS_PER_DAY (SECS_PER_HOUR * 24)
+#include <linux/time.h>
struct timespec *
udf_disk_stamp_to_time(struct timespec *dest, struct timestamp src)
{
- int yday;
u16 typeAndTimezone = le16_to_cpu(src.typeAndTimezone);
u16 year = le16_to_cpu(src.year);
uint8_t type = typeAndTimezone >> 12;
@@ -102,15 +57,9 @@ udf_disk_stamp_to_time(struct timespec *dest, struct timestamp src)
} else
offset = 0;
- if ((year < EPOCH_YEAR) ||
- (year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) {
- return NULL;
- }
- dest->tv_sec = year_seconds[year - EPOCH_YEAR];
+ dest->tv_sec = mktime64(year, src.month, src.day, src.hour, src.minute,
+ src.second);
dest->tv_sec -= offset * 60;
-
- yday = ((__mon_yday[__isleap(year)][src.month - 1]) + src.day - 1);
- dest->tv_sec += (((yday * 24) + src.hour) * 60 + src.minute) * 60 + src.second;
dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
src.hundredsOfMicroseconds * 100 + src.microseconds);
return dest;
@@ -119,9 +68,9 @@ udf_disk_stamp_to_time(struct timespec *dest, struct timestamp src)
struct timestamp *
udf_time_to_disk_stamp(struct timestamp *dest, struct timespec ts)
{
- long int days, rem, y;
- const unsigned short int *ip;
+ long seconds;
int16_t offset;
+ struct tm tm;
offset = -sys_tz.tz_minuteswest;
@@ -130,35 +79,14 @@ udf_time_to_disk_stamp(struct timestamp *dest, struct timespec ts)
dest->typeAndTimezone = cpu_to_le16(0x1000 | (offset & 0x0FFF));
- ts.tv_sec += offset * 60;
- days = ts.tv_sec / SECS_PER_DAY;
- rem = ts.tv_sec % SECS_PER_DAY;
- dest->hour = rem / SECS_PER_HOUR;
- rem %= SECS_PER_HOUR;
- dest->minute = rem / 60;
- dest->second = rem % 60;
- y = 1970;
-
-#define DIV(a, b) ((a) / (b) - ((a) % (b) < 0))
-#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
-
- while (days < 0 || days >= (__isleap(y) ? 366 : 365)) {
- long int yg = y + days / 365 - (days % 365 < 0);
-
- /* Adjust DAYS and Y to match the guessed year. */
- days -= ((yg - y) * 365
- + LEAPS_THRU_END_OF(yg - 1)
- - LEAPS_THRU_END_OF(y - 1));
- y = yg;
- }
- dest->year = cpu_to_le16(y);
- ip = __mon_yday[__isleap(y)];
- for (y = 11; days < (long int)ip[y]; --y)
- continue;
- days -= ip[y];
- dest->month = y + 1;
- dest->day = days + 1;
-
+ seconds = ts.tv_sec + offset * 60;
+ time64_to_tm(seconds, 0, &tm);
+ dest->year = cpu_to_le16(tm.tm_year + 1900);
+ dest->month = tm.tm_mon + 1;
+ dest->day = tm.tm_mday;
+ dest->hour = tm.tm_hour;
+ dest->minute = tm.tm_min;
+ dest->second = tm.tm_sec;
dest->centiseconds = ts.tv_nsec / 10000000;
dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 -
dest->centiseconds * 10000) / 100;
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index d6ea520162b2..4d85992d75b2 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -54,6 +54,16 @@ kmem_flags_convert(xfs_km_flags_t flags)
lflags &= ~__GFP_FS;
}
+ /*
+ * Default page/slab allocator behavior is to retry for ever
+ * for small allocations. We can override this behavior by using
+ * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
+ * as it is feasible but rather fail than retry forever for all
+ * request sizes.
+ */
+ if (flags & KM_MAYFAIL)
+ lflags |= __GFP_RETRY_MAYFAIL;
+
if (flags & KM_ZERO)
lflags |= __GFP_ZERO;
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index ef8a1c75a467..de7b9bd30bec 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -114,12 +114,14 @@ xfs_inode_hasattr(
* Overall external interface routines.
*========================================================================*/
-/* Retrieve an extended attribute and its value. Must have iolock. */
+/* Retrieve an extended attribute and its value. Must have ilock. */
int
xfs_attr_get_ilocked(
struct xfs_inode *ip,
struct xfs_da_args *args)
{
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
+
if (!xfs_inode_hasattr(ip))
return -ENOATTR;
else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h
index 2834574cb6e7..d69c772271cb 100644
--- a/fs/xfs/libxfs/xfs_quota_defs.h
+++ b/fs/xfs/libxfs/xfs_quota_defs.h
@@ -136,8 +136,6 @@ typedef uint16_t xfs_qwarncnt_t;
*/
#define XFS_QMOPT_INHERIT 0x1000000
-#define XFS_QMOPT_NOLOCK 0x2000000 /* don't ilock during dqget */
-
/*
* flags to xfs_trans_mod_dquot.
*/
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 545eca508d42..7740c8a5e736 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -463,6 +463,8 @@ xfs_attr_list_int_ilocked(
{
struct xfs_inode *dp = context->dp;
+ ASSERT(xfs_isilocked(dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
+
/*
* Decide on what work routines to call based on the inode size.
*/
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index f89f7b5241e6..fd2ef8c2c9a7 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -472,23 +472,18 @@ xfs_qm_dqtobp(
struct xfs_mount *mp = dqp->q_mount;
xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
struct xfs_trans *tp = (tpp ? *tpp : NULL);
- uint lock_mode = 0;
+ uint lock_mode;
quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags);
dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
- ASSERT(!(flags & XFS_QMOPT_NOLOCK) ||
- xfs_isilocked(quotip, XFS_ILOCK_SHARED) ||
- xfs_isilocked(quotip, XFS_ILOCK_EXCL));
- if (!(flags & XFS_QMOPT_NOLOCK))
- lock_mode = xfs_ilock_data_map_shared(quotip);
+ lock_mode = xfs_ilock_data_map_shared(quotip);
if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
/*
* Return if this type of quotas is turned off while we
* didn't have the quota inode lock.
*/
- if (lock_mode)
- xfs_iunlock(quotip, lock_mode);
+ xfs_iunlock(quotip, lock_mode);
return -ESRCH;
}
@@ -498,8 +493,7 @@ xfs_qm_dqtobp(
error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
- if (lock_mode)
- xfs_iunlock(quotip, lock_mode);
+ xfs_iunlock(quotip, lock_mode);
if (error)
return error;
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 12cd9cf7de41..23a50d7aa46a 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -61,6 +61,8 @@ xfs_readlink_bmap_ilocked(
int fsblocks = 0;
int offset;
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
+
fsblocks = xfs_symlink_blocks(mp, pathlen);
error = xfs_bmapi_read(ip, 0, fsblocks, mval, &nmaps, 0);
if (error)
diff --git a/include/asm-generic/uaccess-unaligned.h b/include/asm-generic/uaccess-unaligned.h
deleted file mode 100644
index 67deb898f0c5..000000000000
--- a/include/asm-generic/uaccess-unaligned.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __ASM_GENERIC_UACCESS_UNALIGNED_H
-#define __ASM_GENERIC_UACCESS_UNALIGNED_H
-
-/*
- * This macro should be used instead of __get_user() when accessing
- * values at locations that are not known to be aligned.
- */
-#define __get_user_unaligned(x, ptr) \
-({ \
- __typeof__ (*(ptr)) __x; \
- __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \
- (x) = __x; \
-})
-
-
-/*
- * This macro should be used instead of __put_user() when accessing
- * values at locations that are not known to be aligned.
- */
-#define __put_user_unaligned(x, ptr) \
-({ \
- __typeof__ (*(ptr)) __x = (x); \
- __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \
-})
-
-#endif /* __ASM_GENERIC_UACCESS_UNALIGNED_H */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 4c8d4c81a0e8..182f83283e24 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -22,56 +22,56 @@ struct dw_hdmi;
* 48bit bus.
*
* +----------------------+----------------------------------+------------------------------+
- * + Format Name + Format Code + Encodings +
+ * | Format Name | Format Code | Encodings |
* +----------------------+----------------------------------+------------------------------+
- * + RGB 4:4:4 8bit + ``MEDIA_BUS_FMT_RGB888_1X24`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * | RGB 4:4:4 8bit | ``MEDIA_BUS_FMT_RGB888_1X24`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+
- * + RGB 4:4:4 10bits + ``MEDIA_BUS_FMT_RGB101010_1X30`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * | RGB 4:4:4 10bits | ``MEDIA_BUS_FMT_RGB101010_1X30`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+
- * + RGB 4:4:4 12bits + ``MEDIA_BUS_FMT_RGB121212_1X36`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * | RGB 4:4:4 12bits | ``MEDIA_BUS_FMT_RGB121212_1X36`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+
- * + RGB 4:4:4 16bits + ``MEDIA_BUS_FMT_RGB161616_1X48`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * | RGB 4:4:4 16bits | ``MEDIA_BUS_FMT_RGB161616_1X48`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:4:4 8bit + ``MEDIA_BUS_FMT_YUV8_1X24`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
- * + + + or ``V4L2_YCBCR_ENC_XV601`` +
- * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * | YCbCr 4:4:4 8bit | ``MEDIA_BUS_FMT_YUV8_1X24`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV601`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:4:4 10bits + ``MEDIA_BUS_FMT_YUV10_1X30`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
- * + + + or ``V4L2_YCBCR_ENC_XV601`` +
- * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * | YCbCr 4:4:4 10bits | ``MEDIA_BUS_FMT_YUV10_1X30`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV601`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:4:4 12bits + ``MEDIA_BUS_FMT_YUV12_1X36`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
- * + + + or ``V4L2_YCBCR_ENC_XV601`` +
- * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * | YCbCr 4:4:4 12bits | ``MEDIA_BUS_FMT_YUV12_1X36`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV601`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:4:4 16bits + ``MEDIA_BUS_FMT_YUV16_1X48`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
- * + + + or ``V4L2_YCBCR_ENC_XV601`` +
- * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * | YCbCr 4:4:4 16bits | ``MEDIA_BUS_FMT_YUV16_1X48`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV601`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:2 8bit + ``MEDIA_BUS_FMT_UYVY8_1X16`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:2 8bit | ``MEDIA_BUS_FMT_UYVY8_1X16`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:2 10bits + ``MEDIA_BUS_FMT_UYVY10_1X20`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:2 10bits | ``MEDIA_BUS_FMT_UYVY10_1X20`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:2 12bits + ``MEDIA_BUS_FMT_UYVY12_1X24`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:2 12bits | ``MEDIA_BUS_FMT_UYVY12_1X24`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:0 8bit + ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:0 8bit | ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:0 10bits + ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``+ ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:0 10bits | ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``| ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:0 12bits + ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``+ ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:0 12bits | ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``| ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:0 16bits + ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``+ ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:0 16bits | ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``| ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
*/
diff --git a/include/dt-bindings/clock/boston-clock.h b/include/dt-bindings/clock/boston-clock.h
new file mode 100644
index 000000000000..a6f009821137
--- /dev/null
+++ b/include/dt-bindings/clock/boston-clock.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__
+#define __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__
+
+#define BOSTON_CLK_INPUT 0
+#define BOSTON_CLK_SYS 1
+#define BOSTON_CLK_CPU 2
+
+#endif /* __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__ */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 334165c911f0..854e1bdd0b2a 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -69,34 +69,14 @@ static inline void __add_wb_stat(struct bdi_writeback *wb,
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
}
-static inline void __inc_wb_stat(struct bdi_writeback *wb,
- enum wb_stat_item item)
-{
- __add_wb_stat(wb, item, 1);
-}
-
static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- unsigned long flags;
-
- local_irq_save(flags);
- __inc_wb_stat(wb, item);
- local_irq_restore(flags);
-}
-
-static inline void __dec_wb_stat(struct bdi_writeback *wb,
- enum wb_stat_item item)
-{
- __add_wb_stat(wb, item, -1);
+ __add_wb_stat(wb, item, 1);
}
static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- unsigned long flags;
-
- local_irq_save(flags);
- __dec_wb_stat(wb, item);
- local_irq_restore(flags);
+ __add_wb_stat(wb, item, -1);
}
static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 05488da3aee9..3ae9013eeaaa 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -46,7 +46,7 @@ struct linux_binprm {
unsigned interp_flags;
unsigned interp_data;
unsigned long loader, exec;
-};
+} __randomize_layout;
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
@@ -81,7 +81,7 @@ struct linux_binfmt {
int (*load_shlib)(struct file *);
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
-};
+} __randomize_layout;
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 360c082e885c..d41d40ac3efd 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -85,7 +85,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
int __ret = 0; \
if (cgroup_bpf_enabled && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
- if (sk_fullsock(__sk)) \
+ if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
sock_ops, \
BPF_CGROUP_SOCK_OPS); \
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 621076f56251..8e5d31f6faef 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -43,6 +43,7 @@ struct bpf_reg_state {
u32 min_align;
u32 aux_off;
u32 aux_off_align;
+ bool value_from_signed;
};
enum bpf_stack_slot_type {
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index 408bc09ce497..cb28eb21e3ca 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -17,7 +17,7 @@ struct cdev {
struct list_head list;
dev_t dev;
unsigned int count;
-};
+} __randomize_layout;
void cdev_init(struct cdev *, const struct file_operations *);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index f0f6c537b64c..040dd105c3e7 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -10,14 +10,14 @@
#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL
#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \
- const static uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
- const static uint64_t CEPH_FEATUREMASK_##name = \
+ static const uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/* this bit is ignored but still advertised by release *when* */
#define DEFINE_CEPH_FEATURE_DEPRECATED(bit, incarnation, name, when) \
- const static uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
- const static uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
+ static const uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/*
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 91bd464f4c9b..12c96d94d1fa 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -657,6 +657,28 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
+static inline int clk_bulk_prepare_enable(int num_clks,
+ struct clk_bulk_data *clks)
+{
+ int ret;
+
+ ret = clk_bulk_prepare(num_clks, clks);
+ if (ret)
+ return ret;
+ ret = clk_bulk_enable(num_clks, clks);
+ if (ret)
+ clk_bulk_unprepare(num_clks, clks);
+
+ return ret;
+}
+
+static inline void clk_bulk_disable_unprepare(int num_clks,
+ struct clk_bulk_data *clks)
+{
+ clk_bulk_disable(num_clks, clks);
+ clk_bulk_unprepare(num_clks, clks);
+}
+
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
struct clk *of_clk_get(struct device_node *np, int index);
struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index cd4bbe8242bd..bdb80c4aef6e 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -235,6 +235,7 @@
#endif /* GCC_VERSION >= 40500 */
#if GCC_VERSION >= 40600
+
/*
* When used with Link Time Optimization, gcc can optimize away C functions or
* variables which are referenced only from assembly code. __visible tells the
@@ -242,7 +243,17 @@
* this.
*/
#define __visible __attribute__((externally_visible))
-#endif
+
+/*
+ * RANDSTRUCT_PLUGIN wants to use an anonymous struct, but it is only
+ * possible since GCC 4.6. To provide as much build testing coverage
+ * as possible, this is used for all GCC 4.6+ builds, and not just on
+ * RANDSTRUCT_PLUGIN builds.
+ */
+#define randomized_struct_fields_start struct {
+#define randomized_struct_fields_end } __randomize_layout;
+
+#endif /* GCC_VERSION >= 40600 */
#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 219f82f3ec1a..eca8ad75e28b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -452,6 +452,11 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
# define __no_randomize_layout
#endif
+#ifndef randomized_struct_fields_start
+# define randomized_struct_fields_start
+# define randomized_struct_fields_end
+#endif
+
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c156f5082758..d4292ebc5c8b 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -28,47 +28,49 @@
#include <linux/thermal.h>
#include <linux/cpumask.h>
+struct cpufreq_policy;
+
typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
unsigned long voltage, u32 *power);
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @policy: cpufreq policy.
*/
struct thermal_cooling_device *
-cpufreq_cooling_register(const struct cpumask *clip_cpus);
+cpufreq_cooling_register(struct cpufreq_policy *policy);
struct thermal_cooling_device *
-cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
+cpufreq_power_cooling_register(struct cpufreq_policy *policy,
u32 capacitance, get_static_t plat_static_func);
/**
* of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
* @np: a valid struct device_node to the cooling device device tree node.
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @policy: cpufreq policy.
*/
#ifdef CONFIG_THERMAL_OF
struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus);
+ struct cpufreq_policy *policy);
struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus,
+ struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func);
#else
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus)
+ struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus,
+ struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func)
{
@@ -82,15 +84,14 @@ of_cpufreq_power_cooling_register(struct device_node *np,
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
-unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
#else /* !CONFIG_CPU_THERMAL */
static inline struct thermal_cooling_device *
-cpufreq_cooling_register(const struct cpumask *clip_cpus)
+cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
-cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
+cpufreq_power_cooling_register(struct cpufreq_policy *policy,
u32 capacitance, get_static_t plat_static_func)
{
return NULL;
@@ -98,14 +99,14 @@ cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus)
+ struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus,
+ struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func)
{
@@ -117,11 +118,6 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
return;
}
-static inline
-unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
-{
- return THERMAL_CSTATE_INVALID;
-}
#endif /* CONFIG_CPU_THERMAL */
#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 905117bd5012..f10a9b3761cd 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -862,6 +862,20 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
return -EINVAL;
}
}
+
+static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos;
+ int count = 0;
+
+ if (unlikely(!policy->freq_table))
+ return 0;
+
+ cpufreq_for_each_valid_entry(pos, policy->freq_table)
+ count++;
+
+ return count;
+}
#else
static inline int cpufreq_boost_trigger_state(int state)
{
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 4090a42578a8..2df2118fbe13 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -19,7 +19,7 @@
CRASH_CORE_NOTE_NAME_BYTES + \
CRASH_CORE_NOTE_DESC_BYTES)
-#define VMCOREINFO_BYTES (4096)
+#define VMCOREINFO_BYTES PAGE_SIZE
#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
@@ -28,6 +28,7 @@
typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
+void crash_update_vmcoreinfo_safecopy(void *ptr);
void crash_save_vmcoreinfo(void);
void arch_crash_save_vmcoreinfo(void);
__printf(1, 2)
@@ -56,9 +57,7 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_CONFIG(name) \
vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
-extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
-extern size_t vmcoreinfo_size;
-extern size_t vmcoreinfo_max_size;
+extern u32 *vmcoreinfo_note;
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len);
diff --git a/include/linux/cred.h b/include/linux/cred.h
index c728d515e5e2..099058e1178b 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -31,7 +31,7 @@ struct group_info {
atomic_t usage;
int ngroups;
kgid_t gid[0];
-};
+} __randomize_layout;
/**
* get_group_info - Get a reference to a group info structure
@@ -145,7 +145,7 @@ struct cred {
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
struct rcu_head rcu; /* RCU deletion hook */
-};
+} __randomize_layout;
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 025727bf6797..aae1cdb76851 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -55,6 +55,11 @@ struct qstr {
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
+extern const char empty_string[];
+extern const struct qstr empty_name;
+extern const char slash_string[];
+extern const struct qstr slash_name;
+
struct dentry_stat_t {
long nr_dentry;
long nr_unused;
@@ -113,7 +118,7 @@ struct dentry {
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
-};
+} __randomize_layout;
/*
* dentry->d_lock spinlock nesting subclasses:
@@ -592,8 +597,8 @@ static inline struct inode *d_real_inode(const struct dentry *dentry)
}
struct name_snapshot {
- const char *name;
- char inline_name[DNAME_INLINE_LEN];
+ const unsigned char *name;
+ unsigned char inline_name[DNAME_INLINE_LEN];
};
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 6daf6d4971f6..2f14ac73d01d 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -14,6 +14,7 @@
#define _LINUX_EVENTPOLL_H
#include <uapi/linux/eventpoll.h>
+#include <uapi/linux/kcmp.h>
/* Forward declarations to avoid compiler errors */
@@ -22,6 +23,10 @@ struct file;
#ifdef CONFIG_EPOLL
+#ifdef CONFIG_CHECKPOINT_RESTORE
+struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff);
+#endif
+
/* Used to initialize the epoll bits inside the "struct file" */
static inline void eventpoll_init_file(struct file *file)
{
diff --git a/include/linux/flat.h b/include/linux/flat.h
index 2c1eb15c4ba4..7d542dfd0def 100644
--- a/include/linux/flat.h
+++ b/include/linux/flat.h
@@ -9,8 +9,8 @@
#ifndef _LINUX_FLAT_H
#define _LINUX_FLAT_H
-#include <asm/flat.h>
#include <uapi/linux/flat.h>
+#include <asm/flat.h>
/*
* While it would be nice to keep this header clean, users of older
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 976aaa1af82a..6e1fd5d21248 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -296,7 +296,7 @@ struct kiocb {
void *private;
int ki_flags;
enum rw_hint ki_hint;
-};
+} __randomize_layout;
static inline bool is_sync_kiocb(struct kiocb *kiocb)
{
@@ -404,7 +404,7 @@ struct address_space {
struct list_head private_list; /* ditto */
void *private_data; /* ditto */
errseq_t wb_err;
-} __attribute__((aligned(sizeof(long))));
+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least significant bit
@@ -447,7 +447,7 @@ struct block_device {
int bd_fsfreeze_count;
/* Mutex for freeze */
struct mutex bd_fsfreeze_mutex;
-};
+} __randomize_layout;
/*
* Radix-tree tags, for tagging dirty and writeback pages within the pagecache
@@ -666,7 +666,7 @@ struct inode {
#endif
void *i_private; /* fs or device private pointer */
-};
+} __randomize_layout;
static inline unsigned int i_blocksize(const struct inode *node)
{
@@ -883,7 +883,8 @@ struct file {
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
errseq_t f_wb_err;
-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
+} __randomize_layout
+ __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
struct file_handle {
__u32 handle_bytes;
@@ -1020,7 +1021,7 @@ struct file_lock {
int state; /* state of grant or error if -ve */
} afs;
} fl_u;
-};
+} __randomize_layout;
struct file_lock_context {
spinlock_t flc_lock;
@@ -1364,11 +1365,6 @@ struct super_block {
*/
char *s_subtype;
- /*
- * Saved mount options for lazy filesystems using
- * generic_show_options()
- */
- char __rcu *s_options;
const struct dentry_operations *s_d_op; /* default d_op for dentries */
/*
@@ -1417,7 +1413,7 @@ struct super_block {
spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb; /* writeback inodes */
-};
+} __randomize_layout;
/* Helper functions so that in most cases filesystems will
* not need to deal directly with kuid_t and kgid_t and can
@@ -1703,7 +1699,7 @@ struct file_operations {
u64);
ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *,
u64);
-};
+} __randomize_layout;
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
@@ -3046,7 +3042,7 @@ extern int generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo, u64 start,
u64 len, get_block_t *get_block);
-extern void get_filesystem(struct file_system_type *fs);
+extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(struct block_device *);
@@ -3123,10 +3119,6 @@ extern void setattr_copy(struct inode *inode, const struct iattr *attr);
extern int file_update_time(struct file *file);
-extern int generic_show_options(struct seq_file *m, struct dentry *root);
-extern void save_mount_options(struct super_block *sb, char *options);
-extern void replace_mount_options(struct super_block *sb, char *options);
-
static inline bool io_is_direct(struct file *filp)
{
return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 0efc3e62843a..7a026240cbb1 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -12,7 +12,7 @@ struct fs_struct {
int umask;
int in_exec;
struct path root, pwd;
-};
+} __randomize_layout;
extern struct kmem_cache *fs_cachep;
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 9ab375419189..50893a1646cf 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -99,6 +99,10 @@ struct fwnode_operations {
(fwnode ? (fwnode_has_op(fwnode, op) ? \
(fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \
-EINVAL)
+#define fwnode_call_bool_op(fwnode, op, ...) \
+ (fwnode ? (fwnode_has_op(fwnode, op) ? \
+ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) : \
+ false)
#define fwnode_call_ptr_op(fwnode, op, ...) \
(fwnode_has_op(fwnode, op) ? \
(fwnode)->ops->op(fwnode, ## __VA_ARGS__) : NULL)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 4c6656f1fee7..bcfb9f7c46f5 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -25,7 +25,7 @@ struct vm_area_struct;
#define ___GFP_FS 0x80u
#define ___GFP_COLD 0x100u
#define ___GFP_NOWARN 0x200u
-#define ___GFP_REPEAT 0x400u
+#define ___GFP_RETRY_MAYFAIL 0x400u
#define ___GFP_NOFAIL 0x800u
#define ___GFP_NORETRY 0x1000u
#define ___GFP_MEMALLOC 0x2000u
@@ -136,26 +136,56 @@ struct vm_area_struct;
*
* __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
- * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
- * _might_ fail. This depends upon the particular VM implementation.
+ * The default allocator behavior depends on the request size. We have a concept
+ * of so called costly allocations (with order > PAGE_ALLOC_COSTLY_ORDER).
+ * !costly allocations are too essential to fail so they are implicitly
+ * non-failing by default (with some exceptions like OOM victims might fail so
+ * the caller still has to check for failures) while costly requests try to be
+ * not disruptive and back off even without invoking the OOM killer.
+ * The following three modifiers might be used to override some of these
+ * implicit rules
+ *
+ * __GFP_NORETRY: The VM implementation will try only very lightweight
+ * memory direct reclaim to get some memory under memory pressure (thus
+ * it can sleep). It will avoid disruptive actions like OOM killer. The
+ * caller must handle the failure which is quite likely to happen under
+ * heavy memory pressure. The flag is suitable when failure can easily be
+ * handled at small cost, such as reduced throughput
+ *
+ * __GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication
+ * that progress has been made else where. It can wait for other
+ * tasks to attempt high level approaches to freeing memory such as
+ * compaction (which removes fragmentation) and page-out.
+ * There is still a definite limit to the number of retries, but it is
+ * a larger limit than with __GFP_NORETRY.
+ * Allocations with this flag may fail, but only when there is
+ * genuinely little unused memory. While these allocations do not
+ * directly trigger the OOM killer, their failure indicates that
+ * the system is likely to need to use the OOM killer soon. The
+ * caller must handle failure, but can reasonably do so by failing
+ * a higher-level request, or completing it only in a much less
+ * efficient manner.
+ * If the allocation does fail, and the caller is in a position to
+ * free some non-essential memory, doing so could benefit the system
+ * as a whole.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. New users should be evaluated carefully
- * (and the flag should be used only when there is no reasonable failure
- * policy) but it is definitely preferable to use the flag rather than
- * opencode endless loop around allocator.
- *
- * __GFP_NORETRY: The VM implementation must not retry indefinitely and will
- * return NULL when direct reclaim and memory compaction have failed to allow
- * the allocation to succeed. The OOM killer is not called with the current
- * implementation.
+ * cannot handle allocation failures. The allocation could block
+ * indefinitely but will never return with failure. Testing for
+ * failure is pointless.
+ * New users should be evaluated carefully (and the flag should be
+ * used only when there is no reasonable failure policy) but it is
+ * definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ * Using this flag for costly allocations is _highly_ discouraged.
*/
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
-#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
+#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 8d9fe131a240..0ed8e41aaf11 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -268,6 +268,9 @@ struct hugetlbfs_sb_info {
spinlock_t stat_lock;
struct hstate *hstate;
struct hugepage_subpool *spool;
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
};
static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 71fd92d81b26..fadd579d577d 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -20,6 +20,9 @@ struct kern_ipc_perm {
umode_t mode;
unsigned long seq;
void *security;
-} ____cacheline_aligned_in_smp;
+
+ struct rcu_head rcu;
+ atomic_t refcount;
+} ____cacheline_aligned_in_smp __randomize_layout;
#endif /* _LINUX_IPC_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 848e5796400e..65327ee0936b 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -61,7 +61,7 @@ struct ipc_namespace {
struct ucounts *ucounts;
struct ns_common ns;
-};
+} __randomize_layout;
extern struct ipc_namespace init_ipc_ns;
extern spinlock_t mq_lock;
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index 348c6f47e4cc..8037850f3104 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -85,19 +85,18 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
k += 12;
}
/* Last block: affect all 32 bits of (c) */
- /* All the case statements fall through */
switch (length) {
- case 12: c += (u32)k[11]<<24;
- case 11: c += (u32)k[10]<<16;
- case 10: c += (u32)k[9]<<8;
- case 9: c += k[8];
- case 8: b += (u32)k[7]<<24;
- case 7: b += (u32)k[6]<<16;
- case 6: b += (u32)k[5]<<8;
- case 5: b += k[4];
- case 4: a += (u32)k[3]<<24;
- case 3: a += (u32)k[2]<<16;
- case 2: a += (u32)k[1]<<8;
+ case 12: c += (u32)k[11]<<24; /* fall through */
+ case 11: c += (u32)k[10]<<16; /* fall through */
+ case 10: c += (u32)k[9]<<8; /* fall through */
+ case 9: c += k[8]; /* fall through */
+ case 8: b += (u32)k[7]<<24; /* fall through */
+ case 7: b += (u32)k[6]<<16; /* fall through */
+ case 6: b += (u32)k[5]<<8; /* fall through */
+ case 5: b += k[4]; /* fall through */
+ case 4: a += (u32)k[3]<<24; /* fall through */
+ case 3: a += (u32)k[2]<<16; /* fall through */
+ case 2: a += (u32)k[1]<<8; /* fall through */
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
@@ -131,10 +130,10 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
k += 3;
}
- /* Handle the last 3 u32's: all the case statements fall through */
+ /* Handle the last 3 u32's */
switch (length) {
- case 3: c += k[2];
- case 2: b += k[1];
+ case 3: c += k[2]; /* fall through */
+ case 2: b += k[1]; /* fall through */
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1c91f26e2996..bd6d96cf80b1 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -11,6 +11,7 @@
#include <linux/log2.h>
#include <linux/typecheck.h>
#include <linux/printk.h>
+#include <linux/build_bug.h>
#include <asm/byteorder.h>
#include <uapi/linux/kernel.h>
@@ -854,9 +855,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @member: the name of the member within the struct.
*
*/
-#define container_of(ptr, type, member) ({ \
- const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
+#define container_of(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
+ !__same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type *)(__mptr - offsetof(type, member))); })
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 65888418fb69..dd056fab9e35 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -172,6 +172,7 @@ struct kimage {
unsigned long start;
struct page *control_code_page;
struct page *swap_page;
+ void *vmcoreinfo_data_copy; /* locates in the crash memory */
unsigned long nr_segments;
struct kexec_segment segment[KEXEC_SEGMENT_MAX];
@@ -241,6 +242,7 @@ extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
int kexec_crash_loaded(void);
void crash_save_cpu(struct pt_regs *regs, int cpu);
+extern int kimage_crash_copy_vmcoreinfo(struct kimage *image);
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 8496cf64575c..9520fc3c3b9a 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -45,7 +45,7 @@ struct key_preparsed_payload {
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
time_t expiry; /* Expiry time of key */
-};
+} __randomize_layout;
typedef int (*request_key_actor_t)(struct key_construction *key,
const char *op, void *aux);
@@ -158,7 +158,7 @@ struct key_type {
/* internal fields */
struct list_head link; /* link in types list */
struct lock_class_key lock_class; /* key->sem lock class */
-};
+} __randomize_layout;
extern struct key_type key_type_keyring;
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index c4e441e00db5..655082c88fd9 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -64,7 +64,7 @@ struct subprocess_info {
int (*init)(struct subprocess_info *info, struct cred *new);
void (*cleanup)(struct subprocess_info *info);
void *data;
-};
+} __randomize_layout;
extern int
call_usermodehelper(const char *path, char **argv, char **envp, int wait);
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index eeab34b0f589..4d800c79475a 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -172,7 +172,7 @@ struct kset {
spinlock_t list_lock;
struct kobject kobj;
const struct kset_uevent_ops *uevent_ops;
-};
+} __randomize_layout;
extern void kset_init(struct kset *kset);
extern int __must_check kset_register(struct kset *kset);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0b50e7b35ed4..648b34cabb38 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -234,7 +234,7 @@ struct kvm_vcpu {
int guest_fpu_loaded, guest_xcr0_loaded;
struct swait_queue_head wq;
- struct pid *pid;
+ struct pid __rcu *pid;
int sigset_active;
sigset_t sigset;
struct kvm_vcpu_stat stat;
@@ -390,7 +390,7 @@ struct kvm {
spinlock_t mmu_lock;
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
+ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
/*
@@ -404,7 +404,7 @@ struct kvm {
int last_boosted_vcpu;
struct list_head vm_list;
struct mutex lock;
- struct kvm_io_bus *buses[KVM_NR_BUSES];
+ struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
#ifdef CONFIG_HAVE_KVM_EVENTFD
struct {
spinlock_t lock;
@@ -473,6 +473,12 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
+{
+ return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
+ lockdep_is_held(&kvm->slots_lock));
+}
+
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
@@ -562,9 +568,8 @@ void kvm_put_kvm(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
- return rcu_dereference_check(kvm->memslots[as_id],
- srcu_read_lock_held(&kvm->srcu)
- || lockdep_is_held(&kvm->slots_lock));
+ return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
+ lockdep_is_held(&kvm->slots_lock));
}
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
diff --git a/include/linux/llist.h b/include/linux/llist.h
index d11738110a7a..1957635e6d5f 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -93,6 +93,23 @@ static inline void init_llist_head(struct llist_head *list)
container_of(ptr, type, member)
/**
+ * member_address_is_nonnull - check whether the member address is not NULL
+ * @ptr: the object pointer (struct type * that contains the llist_node)
+ * @member: the name of the llist_node within the struct.
+ *
+ * This macro is conceptually the same as
+ * &ptr->member != NULL
+ * but it works around the fact that compilers can decide that taking a member
+ * address is never a NULL pointer.
+ *
+ * Real objects that start at a high address and have a member at NULL are
+ * unlikely to exist, but such pointers may be returned e.g. by the
+ * container_of() macro.
+ */
+#define member_address_is_nonnull(ptr, member) \
+ ((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0)
+
+/**
* llist_for_each - iterate over some deleted entries of a lock-less list
* @pos: the &struct llist_node to use as a loop cursor
* @node: the first entry of deleted list entries
@@ -145,7 +162,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
#define llist_for_each_entry(pos, node, member) \
for ((pos) = llist_entry((node), typeof(*(pos)), member); \
- &(pos)->member != NULL; \
+ member_address_is_nonnull(pos, member); \
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
/**
@@ -167,7 +184,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
#define llist_for_each_entry_safe(pos, n, node, member) \
for (pos = llist_entry((node), typeof(*pos), member); \
- &pos->member != NULL && \
+ member_address_is_nonnull(pos, member) && \
(n = llist_entry(pos->member.next, typeof(*n), member), true); \
pos = n)
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 41f7b6a04d69..3eca67728366 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -192,9 +192,9 @@ struct nlm_block {
* Global variables
*/
extern const struct rpc_program nlm_program;
-extern struct svc_procedure nlmsvc_procedures[];
+extern const struct svc_procedure nlmsvc_procedures[];
#ifdef CONFIG_LOCKD_V4
-extern struct svc_procedure nlmsvc_procedures4[];
+extern const struct svc_procedure nlmsvc_procedures4[];
#endif
extern int nlmsvc_grace_period;
extern unsigned long nlmsvc_timeout;
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index d39ed1cc5fbf..7acbecc21a40 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -95,19 +95,19 @@ struct nlm_reboot {
*/
#define NLMSVC_XDRSIZE sizeof(struct nlm_args)
-int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_encode_testres(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_encode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlmsvc_decode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlmsvc_encode_void(struct svc_rqst *, __be32 *, void *);
-int nlmsvc_decode_void(struct svc_rqst *, __be32 *, void *);
-int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlmsvc_decode_notify(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *, struct nlm_reboot *);
+int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *);
+int nlmsvc_encode_testres(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *);
+int nlmsvc_encode_res(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_res(struct svc_rqst *, __be32 *);
+int nlmsvc_encode_void(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_void(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *);
+int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_notify(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *);
/*
int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index e58c88b52ce1..bf1645609225 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -23,19 +23,19 @@
-int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_encode_testres(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_encode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlm4svc_decode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlm4svc_encode_void(struct svc_rqst *, __be32 *, void *);
-int nlm4svc_decode_void(struct svc_rqst *, __be32 *, void *);
-int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlm4svc_decode_notify(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *, struct nlm_reboot *);
+int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *);
+int nlm4svc_encode_testres(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *);
+int nlm4svc_encode_res(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_res(struct svc_rqst *, __be32 *);
+int nlm4svc_encode_void(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_void(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *);
+int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_notify(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *);
/*
int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 7a86925ba8f3..3a90febadbe2 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1912,7 +1912,7 @@ struct security_hook_heads {
struct list_head audit_rule_match;
struct list_head audit_rule_free;
#endif /* CONFIG_AUDIT */
-};
+} __randomize_layout;
/*
* Security module hook list structure.
@@ -1923,7 +1923,7 @@ struct security_hook_list {
struct list_head *head;
union security_list_options hook;
char *lsm;
-};
+} __randomize_layout;
/*
* Initializing a security_hook_list structure takes
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 4634da521238..3e0d405dc842 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -34,7 +34,7 @@ extern char *migrate_reason_names[MR_TYPES];
static inline struct page *new_page_nodemask(struct page *page,
int preferred_nid, nodemask_t *nodemask)
{
- gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+ gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
if (PageHuge(page))
return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d5bed0875d30..aad5d81dfb44 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1068,7 +1068,7 @@ static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
}
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf, gfp_t gfp);
+ struct mlx4_buf *buf);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
@@ -1105,10 +1105,9 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw);
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- struct mlx4_buf *buf, gfp_t gfp);
+ struct mlx4_buf *buf);
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
- gfp_t gfp);
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
@@ -1124,8 +1123,7 @@ int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 flags);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
- gfp_t gfp);
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 45cdb27791a3..ff151814a02d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -342,7 +342,7 @@ struct vm_area_struct {
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
-};
+} __randomize_layout;
struct core_thread {
struct task_struct *task;
@@ -500,7 +500,7 @@ struct mm_struct {
atomic_long_t hugetlb_usage;
#endif
struct work_struct async_put_work;
-};
+} __randomize_layout;
extern struct mm_struct init_mm;
diff --git a/include/linux/module.h b/include/linux/module.h
index 8eb9a1e693e5..e7bdd549e527 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -45,7 +45,7 @@ struct module_kobject {
struct kobject *drivers_dir;
struct module_param_attrs *mp;
struct completion *kobj_completion;
-};
+} __randomize_layout;
struct module_attribute {
struct attribute attr;
@@ -475,7 +475,7 @@ struct module {
ctor_fn_t *ctors;
unsigned int num_ctors;
#endif
-} ____cacheline_aligned;
+} ____cacheline_aligned __randomize_layout;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
#endif
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 8e0352af06b7..1ce85e6fd95f 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -67,7 +67,7 @@ struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
-};
+} __randomize_layout;
struct file; /* forward dec */
struct path;
diff --git a/include/linux/msg.h b/include/linux/msg.h
index f3f302f9c197..a001305f5a79 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -29,7 +29,7 @@ struct msg_queue {
struct list_head q_messages;
struct list_head q_receivers;
struct list_head q_senders;
-};
+} __randomize_layout;
/* Helper routines for sys_msgsnd and sys_msgrcv */
extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index de0d889e4fe1..892148c448cc 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -107,6 +107,8 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
#define NAND_STATUS_READY 0x40
#define NAND_STATUS_WP 0x80
+#define NAND_DATA_IFACE_CHECK_ONLY -1
+
/*
* Constants for ECC_MODES
*/
@@ -116,6 +118,7 @@ typedef enum {
NAND_ECC_HW,
NAND_ECC_HW_SYNDROME,
NAND_ECC_HW_OOB_FIRST,
+ NAND_ECC_ON_DIE,
} nand_ecc_modes_t;
enum nand_ecc_algo {
@@ -257,6 +260,8 @@ struct nand_chip;
/* Vendor-specific feature address (Micron) */
#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
+#define ONFI_FEATURE_ON_DIE_ECC 0x90
+#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
/* ONFI subfeature parameters length */
#define ONFI_SUBFEATURE_PARAM_LEN 4
@@ -477,6 +482,44 @@ static inline void nand_hw_control_init(struct nand_hw_control *nfc)
}
/**
+ * struct nand_ecc_step_info - ECC step information of ECC engine
+ * @stepsize: data bytes per ECC step
+ * @strengths: array of supported strengths
+ * @nstrengths: number of supported strengths
+ */
+struct nand_ecc_step_info {
+ int stepsize;
+ const int *strengths;
+ int nstrengths;
+};
+
+/**
+ * struct nand_ecc_caps - capability of ECC engine
+ * @stepinfos: array of ECC step information
+ * @nstepinfos: number of ECC step information
+ * @calc_ecc_bytes: driver's hook to calculate ECC bytes per step
+ */
+struct nand_ecc_caps {
+ const struct nand_ecc_step_info *stepinfos;
+ int nstepinfos;
+ int (*calc_ecc_bytes)(int step_size, int strength);
+};
+
+/* a shorthand to generate struct nand_ecc_caps with only one ECC stepsize */
+#define NAND_ECC_CAPS_SINGLE(__name, __calc, __step, ...) \
+static const int __name##_strengths[] = { __VA_ARGS__ }; \
+static const struct nand_ecc_step_info __name##_stepinfo = { \
+ .stepsize = __step, \
+ .strengths = __name##_strengths, \
+ .nstrengths = ARRAY_SIZE(__name##_strengths), \
+}; \
+static const struct nand_ecc_caps __name = { \
+ .stepinfos = &__name##_stepinfo, \
+ .nstepinfos = 1, \
+ .calc_ecc_bytes = __calc, \
+}
+
+/**
* struct nand_ecc_ctrl - Control structure for ECC
* @mode: ECC mode
* @algo: ECC algorithm
@@ -815,7 +858,10 @@ struct nand_manufacturer_ops {
* @read_retries: [INTERN] the number of read retry modes supported
* @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
* @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
- * @setup_data_interface: [OPTIONAL] setup the data interface and timing
+ * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
+ * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
+ * means the configuration should not be applied but
+ * only checked.
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -826,9 +872,6 @@ struct nand_manufacturer_ops {
* structure which is shared among multiple independent
* devices.
* @priv: [OPTIONAL] pointer to private chip data
- * @errstat: [OPTIONAL] hardware specific function to perform
- * additional error status checks (determine if errors are
- * correctable).
* @manufacturer: [INTERN] Contains manufacturer information
*/
@@ -852,16 +895,13 @@ struct nand_chip {
int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
int (*erase)(struct mtd_info *mtd, int page);
int (*scan_bbt)(struct mtd_info *mtd);
- int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
- int status, int page);
int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
int feature_addr, uint8_t *subfeature_para);
int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
int feature_addr, uint8_t *subfeature_para);
int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
- int (*setup_data_interface)(struct mtd_info *mtd,
- const struct nand_data_interface *conf,
- bool check_only);
+ int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface *conf);
int chip_delay;
@@ -1244,6 +1284,15 @@ int nand_check_erased_ecc_chunk(void *data, int datalen,
void *extraoob, int extraooblen,
int threshold);
+int nand_check_ecc_caps(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
+
+int nand_match_ecc_req(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
+
+int nand_maximize_ecc(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
+
/* Default write_oob implementation */
int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
@@ -1258,6 +1307,19 @@ int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int page);
+/* Stub used by drivers that do not support GET/SET FEATURES operations */
+int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
+ struct nand_chip *chip, int addr,
+ u8 *subfeature_param);
+
+/* Default read_page_raw implementation */
+int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page);
+
+/* Default write_page_raw implementation */
+int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page);
+
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 06df1e06b6e0..c4beb70dacbd 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -20,6 +20,12 @@
*
* For each partition, these fields are available:
* name: string that will be used to label the partition's MTD device.
+ * types: some partitions can be containers using specific format to describe
+ * embedded subpartitions / volumes. E.g. many home routers use "firmware"
+ * partition that contains at least kernel and rootfs. In such case an
+ * extra parser is needed that will detect these dynamic partitions and
+ * report them to the MTD subsystem. If set this property stores an array
+ * of parser names to use when looking for subpartitions.
* size: the partition size; if defined as MTDPART_SIZ_FULL, the partition
* will extend to the end of the master MTD device.
* offset: absolute starting position within the master MTD device; if
@@ -38,6 +44,7 @@
struct mtd_partition {
const char *name; /* identifier string */
+ const char *const *types; /* names of parsers to use if any */
uint64_t size; /* partition size */
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index f2a718030476..55faa2f07cca 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -73,6 +73,15 @@
#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */
#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */
+/* Double Transfer Rate opcodes - defined in JEDEC JESD216B. */
+#define SPINOR_OP_READ_1_1_1_DTR 0x0d
+#define SPINOR_OP_READ_1_2_2_DTR 0xbd
+#define SPINOR_OP_READ_1_4_4_DTR 0xed
+
+#define SPINOR_OP_READ_1_1_1_DTR_4B 0x0e
+#define SPINOR_OP_READ_1_2_2_DTR_4B 0xbe
+#define SPINOR_OP_READ_1_4_4_DTR_4B 0xee
+
/* Used for SST flashes only. */
#define SPINOR_OP_BP 0x02 /* Byte program */
#define SPINOR_OP_WRDI 0x04 /* Write disable */
@@ -119,13 +128,81 @@
/* Configuration Register bits. */
#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
-enum read_mode {
- SPI_NOR_NORMAL = 0,
- SPI_NOR_FAST,
- SPI_NOR_DUAL,
- SPI_NOR_QUAD,
+/* Supported SPI protocols */
+#define SNOR_PROTO_INST_MASK GENMASK(23, 16)
+#define SNOR_PROTO_INST_SHIFT 16
+#define SNOR_PROTO_INST(_nbits) \
+ ((((unsigned long)(_nbits)) << SNOR_PROTO_INST_SHIFT) & \
+ SNOR_PROTO_INST_MASK)
+
+#define SNOR_PROTO_ADDR_MASK GENMASK(15, 8)
+#define SNOR_PROTO_ADDR_SHIFT 8
+#define SNOR_PROTO_ADDR(_nbits) \
+ ((((unsigned long)(_nbits)) << SNOR_PROTO_ADDR_SHIFT) & \
+ SNOR_PROTO_ADDR_MASK)
+
+#define SNOR_PROTO_DATA_MASK GENMASK(7, 0)
+#define SNOR_PROTO_DATA_SHIFT 0
+#define SNOR_PROTO_DATA(_nbits) \
+ ((((unsigned long)(_nbits)) << SNOR_PROTO_DATA_SHIFT) & \
+ SNOR_PROTO_DATA_MASK)
+
+#define SNOR_PROTO_IS_DTR BIT(24) /* Double Transfer Rate */
+
+#define SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits) \
+ (SNOR_PROTO_INST(_inst_nbits) | \
+ SNOR_PROTO_ADDR(_addr_nbits) | \
+ SNOR_PROTO_DATA(_data_nbits))
+#define SNOR_PROTO_DTR(_inst_nbits, _addr_nbits, _data_nbits) \
+ (SNOR_PROTO_IS_DTR | \
+ SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits))
+
+enum spi_nor_protocol {
+ SNOR_PROTO_1_1_1 = SNOR_PROTO_STR(1, 1, 1),
+ SNOR_PROTO_1_1_2 = SNOR_PROTO_STR(1, 1, 2),
+ SNOR_PROTO_1_1_4 = SNOR_PROTO_STR(1, 1, 4),
+ SNOR_PROTO_1_1_8 = SNOR_PROTO_STR(1, 1, 8),
+ SNOR_PROTO_1_2_2 = SNOR_PROTO_STR(1, 2, 2),
+ SNOR_PROTO_1_4_4 = SNOR_PROTO_STR(1, 4, 4),
+ SNOR_PROTO_1_8_8 = SNOR_PROTO_STR(1, 8, 8),
+ SNOR_PROTO_2_2_2 = SNOR_PROTO_STR(2, 2, 2),
+ SNOR_PROTO_4_4_4 = SNOR_PROTO_STR(4, 4, 4),
+ SNOR_PROTO_8_8_8 = SNOR_PROTO_STR(8, 8, 8),
+
+ SNOR_PROTO_1_1_1_DTR = SNOR_PROTO_DTR(1, 1, 1),
+ SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2),
+ SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4),
+ SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8),
};
+static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto)
+{
+ return !!(proto & SNOR_PROTO_IS_DTR);
+}
+
+static inline u8 spi_nor_get_protocol_inst_nbits(enum spi_nor_protocol proto)
+{
+ return ((unsigned long)(proto & SNOR_PROTO_INST_MASK)) >>
+ SNOR_PROTO_INST_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_addr_nbits(enum spi_nor_protocol proto)
+{
+ return ((unsigned long)(proto & SNOR_PROTO_ADDR_MASK)) >>
+ SNOR_PROTO_ADDR_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_data_nbits(enum spi_nor_protocol proto)
+{
+ return ((unsigned long)(proto & SNOR_PROTO_DATA_MASK)) >>
+ SNOR_PROTO_DATA_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
+{
+ return spi_nor_get_protocol_data_nbits(proto);
+}
+
#define SPI_NOR_MAX_CMD_SIZE 8
enum spi_nor_ops {
SPI_NOR_OPS_READ = 0,
@@ -154,9 +231,11 @@ enum spi_nor_option_flags {
* @read_opcode: the read opcode
* @read_dummy: the dummy needed by the read operation
* @program_opcode: the program opcode
- * @flash_read: the mode of the read
* @sst_write_second: used by the SST write operation
* @flags: flag options for the current SPI-NOR (SNOR_F_*)
+ * @read_proto: the SPI protocol for read operations
+ * @write_proto: the SPI protocol for write operations
+ * @reg_proto the SPI protocol for read_reg/write_reg/erase operations
* @cmd_buf: used by the write_reg
* @prepare: [OPTIONAL] do some preparations for the
* read/write/erase/lock/unlock operations
@@ -185,7 +264,9 @@ struct spi_nor {
u8 read_opcode;
u8 read_dummy;
u8 program_opcode;
- enum read_mode flash_read;
+ enum spi_nor_protocol read_proto;
+ enum spi_nor_protocol write_proto;
+ enum spi_nor_protocol reg_proto;
bool sst_write_second;
u32 flags;
u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
@@ -220,10 +301,71 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
}
/**
+ * struct spi_nor_hwcaps - Structure for describing the hardware capabilies
+ * supported by the SPI controller (bus master).
+ * @mask: the bitmask listing all the supported hw capabilies
+ */
+struct spi_nor_hwcaps {
+ u32 mask;
+};
+
+/*
+ *(Fast) Read capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * As a matter of performances, it is relevant to use Octo SPI protocols first,
+ * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly
+ * (Slow) Read.
+ */
+#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0)
+#define SNOR_HWCAPS_READ BIT(0)
+#define SNOR_HWCAPS_READ_FAST BIT(1)
+#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2)
+
+#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3)
+#define SNOR_HWCAPS_READ_1_1_2 BIT(3)
+#define SNOR_HWCAPS_READ_1_2_2 BIT(4)
+#define SNOR_HWCAPS_READ_2_2_2 BIT(5)
+#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6)
+
+#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7)
+#define SNOR_HWCAPS_READ_1_1_4 BIT(7)
+#define SNOR_HWCAPS_READ_1_4_4 BIT(8)
+#define SNOR_HWCAPS_READ_4_4_4 BIT(9)
+#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10)
+
+#define SNOR_HWCPAS_READ_OCTO GENMASK(14, 11)
+#define SNOR_HWCAPS_READ_1_1_8 BIT(11)
+#define SNOR_HWCAPS_READ_1_8_8 BIT(12)
+#define SNOR_HWCAPS_READ_8_8_8 BIT(13)
+#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14)
+
+/*
+ * Page Program capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the
+ * legacy SPI 1-1-1 protocol.
+ * Note that Dual Page Programs are not supported because there is no existing
+ * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory
+ * implements such commands.
+ */
+#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16)
+#define SNOR_HWCAPS_PP BIT(16)
+
+#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17)
+#define SNOR_HWCAPS_PP_1_1_4 BIT(17)
+#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
+#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
+
+#define SNOR_HWCAPS_PP_OCTO GENMASK(22, 20)
+#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
+#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
+#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
+
+/**
* spi_nor_scan() - scan the SPI NOR
* @nor: the spi_nor structure
* @name: the chip type name
- * @mode: the read mode supported by the driver
+ * @hwcaps: the hardware capabilities supported by the controller driver
*
* The drivers can use this fuction to scan the SPI NOR.
* In the scanning, it will try to get all the necessary information to
@@ -233,6 +375,7 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
*
* Return: 0 for success, others for failure.
*/
-int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode);
+int spi_nor_scan(struct spi_nor *nor, const char *name,
+ const struct spi_nor_hwcaps *hwcaps);
#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index abcfa46a2bd9..dda2cc939a53 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -274,6 +274,8 @@ do { \
#define net_get_random_once(buf, nbytes) \
get_random_once((buf), (nbytes))
+#define net_get_random_once_wait(buf, nbytes) \
+ get_random_once_wait((buf), (nbytes))
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index a4b97be30b28..22f081065d49 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -61,8 +61,6 @@ typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
struct nf_hook_ops {
- struct list_head list;
-
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
@@ -160,13 +158,6 @@ int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n);
-int nf_register_hook(struct nf_hook_ops *reg);
-void nf_unregister_hook(struct nf_hook_ops *reg);
-int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
-void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
-int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
-void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
-
/* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */
int nf_register_sockopt(struct nf_sockopt_ops *reg);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 1b1ca04820a3..47239c336688 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -479,6 +479,7 @@ enum {
NFSPROC4_CLNT_ACCESS,
NFSPROC4_CLNT_GETATTR,
NFSPROC4_CLNT_LOOKUP,
+ NFSPROC4_CLNT_LOOKUPP,
NFSPROC4_CLNT_LOOKUP_ROOT,
NFSPROC4_CLNT_REMOVE,
NFSPROC4_CLNT_RENAME,
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index bb0eb2c9acca..e52cc55ac300 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -332,6 +332,7 @@ extern void nfs_zap_caches(struct inode *);
extern void nfs_invalidate_atime(struct inode *);
extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
struct nfs_fattr *, struct nfs4_label *);
+struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *);
extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index e418a1096662..74c44665e6d3 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -42,6 +42,7 @@ struct nfs_client {
#define NFS_CS_MIGRATION 2 /* - transparent state migr */
#define NFS_CS_INFINITE_SLOTS 3 /* - don't limit TCP slots */
#define NFS_CS_NO_RETRANS_TIMEOUT 4 /* - Disable retransmit timeouts */
+#define NFS_CS_TSM_POSSIBLE 5 /* - Maybe state migration */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -210,6 +211,7 @@ struct nfs_server {
unsigned long mig_status;
#define NFS_MIG_IN_TRANSITION (1)
#define NFS_MIG_FAILED (2)
+#define NFS_MIG_TSM_POSSIBLE (3)
void (*destroy)(struct nfs_server *);
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 247cc3d3498f..d67b67ae6c8b 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -33,6 +33,8 @@ enum {
PG_UPTODATE, /* page group sync bit in read path */
PG_WB_END, /* page group sync bit in write path */
PG_REMOVE, /* page group sync bit in write path */
+ PG_CONTENDED1, /* Is someone waiting for a lock? */
+ PG_CONTENDED2, /* Is someone waiting for a lock? */
};
struct nfs_inode;
@@ -93,8 +95,8 @@ struct nfs_pageio_descriptor {
const struct rpc_call_ops *pg_rpc_callops;
const struct nfs_pgio_completion_ops *pg_completion_ops;
struct pnfs_layout_segment *pg_lseg;
+ struct nfs_io_completion *pg_io_completion;
struct nfs_direct_req *pg_dreq;
- void *pg_layout_private;
unsigned int pg_bsize; /* default bsize for mirrors */
u32 pg_mirror_count;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index b28c83475ee8..ca3bcc4ed4e5 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -878,7 +878,7 @@ struct nfs3_readdirargs {
struct nfs_fh * fh;
__u64 cookie;
__be32 verf[2];
- int plus;
+ bool plus;
unsigned int count;
struct page ** pages;
};
@@ -909,7 +909,7 @@ struct nfs3_linkres {
struct nfs3_readdirres {
struct nfs_fattr * dir_attr;
__be32 * verf;
- int plus;
+ bool plus;
};
struct nfs3_getaclres {
@@ -1012,7 +1012,6 @@ struct nfs4_link_res {
struct nfs_fattr * dir_attr;
};
-
struct nfs4_lookup_arg {
struct nfs4_sequence_args seq_args;
const struct nfs_fh * dir_fh;
@@ -1028,6 +1027,20 @@ struct nfs4_lookup_res {
struct nfs4_label *label;
};
+struct nfs4_lookupp_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *fh;
+ const u32 *bitmask;
+};
+
+struct nfs4_lookupp_res {
+ struct nfs4_sequence_res seq_res;
+ const struct nfs_server *server;
+ struct nfs_fattr *fattr;
+ struct nfs_fh *fh;
+ struct nfs4_label *label;
+};
+
struct nfs4_lookup_root_arg {
struct nfs4_sequence_args seq_args;
const u32 * bitmask;
@@ -1053,7 +1066,7 @@ struct nfs4_readdir_arg {
struct page ** pages; /* zero-copy data */
unsigned int pgbase; /* zero-copy data */
const u32 * bitmask;
- int plus;
+ bool plus;
};
struct nfs4_readdir_res {
@@ -1422,6 +1435,7 @@ enum {
NFS_IOHDR_STAT,
};
+struct nfs_io_completion;
struct nfs_pgio_header {
struct inode *inode;
struct rpc_cred *cred;
@@ -1435,8 +1449,8 @@ struct nfs_pgio_header {
void (*release) (struct nfs_pgio_header *hdr);
const struct nfs_pgio_completion_ops *completion_ops;
const struct nfs_rw_ops *rw_ops;
+ struct nfs_io_completion *io_completion;
struct nfs_direct_req *dreq;
- void *layout_private;
spinlock_t lock;
/* fields protected by lock */
int pnfs_error;
@@ -1533,6 +1547,7 @@ struct nfs_renamedata {
struct nfs_fattr new_fattr;
void (*complete)(struct rpc_task *, struct nfs_renamedata *);
long timeout;
+ bool cancelled;
};
struct nfs_access_entry;
@@ -1567,6 +1582,8 @@ struct nfs_rpc_ops {
int (*lookup) (struct inode *, const struct qstr *,
struct nfs_fh *, struct nfs_fattr *,
struct nfs4_label *);
+ int (*lookupp) (struct inode *, struct nfs_fh *,
+ struct nfs_fattr *, struct nfs4_label *);
int (*access) (struct inode *, struct nfs_access_entry *);
int (*readlink)(struct inode *, struct page *, unsigned int,
unsigned int);
@@ -1585,7 +1602,7 @@ struct nfs_rpc_ops {
int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
int (*readdir) (struct dentry *, struct rpc_cred *,
- u64, struct page **, unsigned int, int);
+ u64, struct page **, unsigned int, bool);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
dev_t);
int (*statfs) (struct nfs_server *, struct nfs_fh *,
@@ -1595,7 +1612,7 @@ struct nfs_rpc_ops {
int (*pathconf) (struct nfs_server *, struct nfs_fh *,
struct nfs_pathconf *);
int (*set_capabilities)(struct nfs_server *, struct nfs_fh *);
- int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int);
+ int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, bool);
int (*pgio_rpc_prepare)(struct rpc_task *,
struct nfs_pgio_header *);
void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index aa3cd0878270..8aa01fd859fb 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -6,18 +6,26 @@
#include <linux/sched.h>
#include <asm/irq.h>
+#if defined(CONFIG_HAVE_NMI_WATCHDOG)
+#include <asm/nmi.h>
+#endif
#ifdef CONFIG_LOCKUP_DETECTOR
+void lockup_detector_init(void);
+#else
+static inline void lockup_detector_init(void)
+{
+}
+#endif
+
+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
extern void touch_softlockup_watchdog_sched(void);
extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
-extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos);
extern unsigned int softlockup_panic;
-extern unsigned int hardlockup_panic;
-void lockup_detector_init(void);
+extern int soft_watchdog_enabled;
+extern atomic_t watchdog_park_in_progress;
#else
static inline void touch_softlockup_watchdog_sched(void)
{
@@ -31,9 +39,6 @@ static inline void touch_softlockup_watchdog_sync(void)
static inline void touch_all_softlockup_watchdogs(void)
{
}
-static inline void lockup_detector_init(void)
-{
-}
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
@@ -61,6 +66,21 @@ static inline void reset_hung_task_detector(void)
#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
+#if defined(CONFIG_HARDLOCKUP_DETECTOR)
+extern void hardlockup_detector_disable(void);
+extern unsigned int hardlockup_panic;
+#else
+static inline void hardlockup_detector_disable(void) {}
+#endif
+
+#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
+extern void arch_touch_nmi_watchdog(void);
+#else
+#if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline void arch_touch_nmi_watchdog(void) {}
+#endif
+#endif
+
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
*
@@ -68,21 +88,11 @@ static inline void reset_hung_task_detector(void)
* may be used to reset the timeout - for code which intentionally
* disables interrupts for a long time. This call is stateless.
*/
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
-#include <asm/nmi.h>
-extern void touch_nmi_watchdog(void);
-#else
static inline void touch_nmi_watchdog(void)
{
+ arch_touch_nmi_watchdog();
touch_softlockup_watchdog();
}
-#endif
-
-#if defined(CONFIG_HARDLOCKUP_DETECTOR)
-extern void hardlockup_detector_disable(void);
-#else
-static inline void hardlockup_detector_disable(void) {}
-#endif
/*
* Create trigger_all_cpu_backtrace() out of the arch-provided
@@ -139,15 +149,18 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
}
#endif
-#ifdef CONFIG_LOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh);
+#endif
+
+#ifdef CONFIG_LOCKUP_DETECTOR
extern int nmi_watchdog_enabled;
-extern int soft_watchdog_enabled;
extern int watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
+extern struct cpumask watchdog_cpumask;
extern unsigned long *watchdog_cpumask_bits;
-extern atomic_t watchdog_park_in_progress;
+extern int __read_mostly watchdog_suspended;
#ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace;
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index de87ceac110e..609e232c00da 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -5,6 +5,7 @@
* GPL LICENSE SUMMARY
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ * Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,6 +19,7 @@
* BSD LICENSE
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ * Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -106,6 +108,7 @@ static inline char *ntb_topo_string(enum ntb_topo topo)
* @NTB_SPEED_GEN1: Link is trained to gen1 speed.
* @NTB_SPEED_GEN2: Link is trained to gen2 speed.
* @NTB_SPEED_GEN3: Link is trained to gen3 speed.
+ * @NTB_SPEED_GEN4: Link is trained to gen4 speed.
*/
enum ntb_speed {
NTB_SPEED_AUTO = -1,
@@ -113,6 +116,7 @@ enum ntb_speed {
NTB_SPEED_GEN1 = 1,
NTB_SPEED_GEN2 = 2,
NTB_SPEED_GEN3 = 3,
+ NTB_SPEED_GEN4 = 4
};
/**
@@ -140,6 +144,20 @@ enum ntb_width {
};
/**
+ * enum ntb_default_port - NTB default port number
+ * @NTB_PORT_PRI_USD: Default port of the NTB_TOPO_PRI/NTB_TOPO_B2B_USD
+ * topologies
+ * @NTB_PORT_SEC_DSD: Default port of the NTB_TOPO_SEC/NTB_TOPO_B2B_DSD
+ * topologies
+ */
+enum ntb_default_port {
+ NTB_PORT_PRI_USD,
+ NTB_PORT_SEC_DSD
+};
+#define NTB_DEF_PEER_CNT (1)
+#define NTB_DEF_PEER_IDX (0)
+
+/**
* struct ntb_client_ops - ntb client operations
* @probe: Notify client of a new device.
* @remove: Notify client to remove a device.
@@ -162,10 +180,12 @@ static inline int ntb_client_ops_is_valid(const struct ntb_client_ops *ops)
* struct ntb_ctx_ops - ntb driver context operations
* @link_event: See ntb_link_event().
* @db_event: See ntb_db_event().
+ * @msg_event: See ntb_msg_event().
*/
struct ntb_ctx_ops {
void (*link_event)(void *ctx);
void (*db_event)(void *ctx, int db_vector);
+ void (*msg_event)(void *ctx);
};
static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
@@ -174,18 +194,27 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
return
/* ops->link_event && */
/* ops->db_event && */
+ /* ops->msg_event && */
1;
}
/**
* struct ntb_ctx_ops - ntb device operations
- * @mw_count: See ntb_mw_count().
- * @mw_get_range: See ntb_mw_get_range().
- * @mw_set_trans: See ntb_mw_set_trans().
- * @mw_clear_trans: See ntb_mw_clear_trans().
+ * @port_number: See ntb_port_number().
+ * @peer_port_count: See ntb_peer_port_count().
+ * @peer_port_number: See ntb_peer_port_number().
+ * @peer_port_idx: See ntb_peer_port_idx().
* @link_is_up: See ntb_link_is_up().
* @link_enable: See ntb_link_enable().
* @link_disable: See ntb_link_disable().
+ * @mw_count: See ntb_mw_count().
+ * @mw_get_align: See ntb_mw_get_align().
+ * @mw_set_trans: See ntb_mw_set_trans().
+ * @mw_clear_trans: See ntb_mw_clear_trans().
+ * @peer_mw_count: See ntb_peer_mw_count().
+ * @peer_mw_get_addr: See ntb_peer_mw_get_addr().
+ * @peer_mw_set_trans: See ntb_peer_mw_set_trans().
+ * @peer_mw_clear_trans:See ntb_peer_mw_clear_trans().
* @db_is_unsafe: See ntb_db_is_unsafe().
* @db_valid_mask: See ntb_db_valid_mask().
* @db_vector_count: See ntb_db_vector_count().
@@ -210,22 +239,43 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
* @peer_spad_addr: See ntb_peer_spad_addr().
* @peer_spad_read: See ntb_peer_spad_read().
* @peer_spad_write: See ntb_peer_spad_write().
+ * @msg_count: See ntb_msg_count().
+ * @msg_inbits: See ntb_msg_inbits().
+ * @msg_outbits: See ntb_msg_outbits().
+ * @msg_read_sts: See ntb_msg_read_sts().
+ * @msg_clear_sts: See ntb_msg_clear_sts().
+ * @msg_set_mask: See ntb_msg_set_mask().
+ * @msg_clear_mask: See ntb_msg_clear_mask().
+ * @msg_read: See ntb_msg_read().
+ * @msg_write: See ntb_msg_write().
*/
struct ntb_dev_ops {
- int (*mw_count)(struct ntb_dev *ntb);
- int (*mw_get_range)(struct ntb_dev *ntb, int idx,
- phys_addr_t *base, resource_size_t *size,
- resource_size_t *align, resource_size_t *align_size);
- int (*mw_set_trans)(struct ntb_dev *ntb, int idx,
- dma_addr_t addr, resource_size_t size);
- int (*mw_clear_trans)(struct ntb_dev *ntb, int idx);
+ int (*port_number)(struct ntb_dev *ntb);
+ int (*peer_port_count)(struct ntb_dev *ntb);
+ int (*peer_port_number)(struct ntb_dev *ntb, int pidx);
+ int (*peer_port_idx)(struct ntb_dev *ntb, int port);
- int (*link_is_up)(struct ntb_dev *ntb,
+ u64 (*link_is_up)(struct ntb_dev *ntb,
enum ntb_speed *speed, enum ntb_width *width);
int (*link_enable)(struct ntb_dev *ntb,
enum ntb_speed max_speed, enum ntb_width max_width);
int (*link_disable)(struct ntb_dev *ntb);
+ int (*mw_count)(struct ntb_dev *ntb, int pidx);
+ int (*mw_get_align)(struct ntb_dev *ntb, int pidx, int widx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max);
+ int (*mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx,
+ dma_addr_t addr, resource_size_t size);
+ int (*mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx);
+ int (*peer_mw_count)(struct ntb_dev *ntb);
+ int (*peer_mw_get_addr)(struct ntb_dev *ntb, int widx,
+ phys_addr_t *base, resource_size_t *size);
+ int (*peer_mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx,
+ u64 addr, resource_size_t size);
+ int (*peer_mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx);
+
int (*db_is_unsafe)(struct ntb_dev *ntb);
u64 (*db_valid_mask)(struct ntb_dev *ntb);
int (*db_vector_count)(struct ntb_dev *ntb);
@@ -252,32 +302,55 @@ struct ntb_dev_ops {
int (*spad_is_unsafe)(struct ntb_dev *ntb);
int (*spad_count)(struct ntb_dev *ntb);
- u32 (*spad_read)(struct ntb_dev *ntb, int idx);
- int (*spad_write)(struct ntb_dev *ntb, int idx, u32 val);
+ u32 (*spad_read)(struct ntb_dev *ntb, int sidx);
+ int (*spad_write)(struct ntb_dev *ntb, int sidx, u32 val);
- int (*peer_spad_addr)(struct ntb_dev *ntb, int idx,
+ int (*peer_spad_addr)(struct ntb_dev *ntb, int pidx, int sidx,
phys_addr_t *spad_addr);
- u32 (*peer_spad_read)(struct ntb_dev *ntb, int idx);
- int (*peer_spad_write)(struct ntb_dev *ntb, int idx, u32 val);
+ u32 (*peer_spad_read)(struct ntb_dev *ntb, int pidx, int sidx);
+ int (*peer_spad_write)(struct ntb_dev *ntb, int pidx, int sidx,
+ u32 val);
+
+ int (*msg_count)(struct ntb_dev *ntb);
+ u64 (*msg_inbits)(struct ntb_dev *ntb);
+ u64 (*msg_outbits)(struct ntb_dev *ntb);
+ u64 (*msg_read_sts)(struct ntb_dev *ntb);
+ int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits);
+ int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits);
+ int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits);
+ int (*msg_read)(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg);
+ int (*msg_write)(struct ntb_dev *ntb, int midx, int pidx, u32 msg);
};
static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
{
/* commented callbacks are not required: */
return
- ops->mw_count &&
- ops->mw_get_range &&
- ops->mw_set_trans &&
- /* ops->mw_clear_trans && */
+ /* Port operations are required for multiport devices */
+ !ops->peer_port_count == !ops->port_number &&
+ !ops->peer_port_number == !ops->port_number &&
+ !ops->peer_port_idx == !ops->port_number &&
+
+ /* Link operations are required */
ops->link_is_up &&
ops->link_enable &&
ops->link_disable &&
+
+ /* One or both MW interfaces should be developed */
+ ops->mw_count &&
+ ops->mw_get_align &&
+ (ops->mw_set_trans ||
+ ops->peer_mw_set_trans) &&
+ /* ops->mw_clear_trans && */
+ ops->peer_mw_count &&
+ ops->peer_mw_get_addr &&
+ /* ops->peer_mw_clear_trans && */
+
+ /* Doorbell operations are mostly required */
/* ops->db_is_unsafe && */
ops->db_valid_mask &&
-
/* both set, or both unset */
- (!ops->db_vector_count == !ops->db_vector_mask) &&
-
+ (!ops->db_vector_count == !ops->db_vector_mask) &&
ops->db_read &&
/* ops->db_set && */
ops->db_clear &&
@@ -291,13 +364,24 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
/* ops->peer_db_read_mask && */
/* ops->peer_db_set_mask && */
/* ops->peer_db_clear_mask && */
- /* ops->spad_is_unsafe && */
- ops->spad_count &&
- ops->spad_read &&
- ops->spad_write &&
- /* ops->peer_spad_addr && */
- /* ops->peer_spad_read && */
- ops->peer_spad_write &&
+
+ /* Scrachpads interface is optional */
+ /* !ops->spad_is_unsafe == !ops->spad_count && */
+ !ops->spad_read == !ops->spad_count &&
+ !ops->spad_write == !ops->spad_count &&
+ /* !ops->peer_spad_addr == !ops->spad_count && */
+ /* !ops->peer_spad_read == !ops->spad_count && */
+ !ops->peer_spad_write == !ops->spad_count &&
+
+ /* Messaging interface is optional */
+ !ops->msg_inbits == !ops->msg_count &&
+ !ops->msg_outbits == !ops->msg_count &&
+ !ops->msg_read_sts == !ops->msg_count &&
+ !ops->msg_clear_sts == !ops->msg_count &&
+ /* !ops->msg_set_mask == !ops->msg_count && */
+ /* !ops->msg_clear_mask == !ops->msg_count && */
+ !ops->msg_read == !ops->msg_count &&
+ !ops->msg_write == !ops->msg_count &&
1;
}
@@ -310,13 +394,12 @@ struct ntb_client {
struct device_driver drv;
const struct ntb_client_ops ops;
};
-
#define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv)
/**
* struct ntb_device - ntb device
* @dev: Linux device object.
- * @pdev: Pci device entry of the ntb.
+ * @pdev: PCI device entry of the ntb.
* @topo: Detected topology of the ntb.
* @ops: See &ntb_dev_ops.
* @ctx: See &ntb_ctx_ops.
@@ -337,7 +420,6 @@ struct ntb_dev {
/* block unregister until device is fully released */
struct completion released;
};
-
#define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev)
/**
@@ -434,86 +516,152 @@ void ntb_link_event(struct ntb_dev *ntb);
* multiple interrupt vectors for doorbells, the vector number indicates which
* vector received the interrupt. The vector number is relative to the first
* vector used for doorbells, starting at zero, and must be less than
- ** ntb_db_vector_count(). The driver may call ntb_db_read() to check which
+ * ntb_db_vector_count(). The driver may call ntb_db_read() to check which
* doorbell bits need service, and ntb_db_vector_mask() to determine which of
* those bits are associated with the vector number.
*/
void ntb_db_event(struct ntb_dev *ntb, int vector);
/**
- * ntb_mw_count() - get the number of memory windows
+ * ntb_msg_event() - notify driver context of a message event
* @ntb: NTB device context.
*
- * Hardware and topology may support a different number of memory windows.
+ * Notify the driver context of a message event. If hardware supports
+ * message registers, this event indicates, that a new message arrived in
+ * some incoming message register or last sent message couldn't be delivered.
+ * The events can be masked/unmasked by the methods ntb_msg_set_mask() and
+ * ntb_msg_clear_mask().
+ */
+void ntb_msg_event(struct ntb_dev *ntb);
+
+/**
+ * ntb_default_port_number() - get the default local port number
+ * @ntb: NTB device context.
*
- * Return: the number of memory windows.
+ * If hardware driver doesn't specify port_number() callback method, the NTB
+ * is considered with just two ports. So this method returns default local
+ * port number in compliance with topology.
+ *
+ * NOTE Don't call this method directly. The ntb_port_number() function should
+ * be used instead.
+ *
+ * Return: the default local port number
+ */
+int ntb_default_port_number(struct ntb_dev *ntb);
+
+/**
+ * ntb_default_port_count() - get the default number of peer device ports
+ * @ntb: NTB device context.
+ *
+ * By default hardware driver supports just one peer device.
+ *
+ * NOTE Don't call this method directly. The ntb_peer_port_count() function
+ * should be used instead.
+ *
+ * Return: the default number of peer ports
+ */
+int ntb_default_peer_port_count(struct ntb_dev *ntb);
+
+/**
+ * ntb_default_peer_port_number() - get the default peer port by given index
+ * @ntb: NTB device context.
+ * @idx: Peer port index (should not differ from zero).
+ *
+ * By default hardware driver supports just one peer device, so this method
+ * shall return the corresponding value from enum ntb_default_port.
+ *
+ * NOTE Don't call this method directly. The ntb_peer_port_number() function
+ * should be used instead.
+ *
+ * Return: the peer device port or negative value indicating an error
+ */
+int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx);
+
+/**
+ * ntb_default_peer_port_idx() - get the default peer device port index by
+ * given port number
+ * @ntb: NTB device context.
+ * @port: Peer port number (should be one of enum ntb_default_port).
+ *
+ * By default hardware driver supports just one peer device, so while
+ * specified port-argument indicates peer port from enum ntb_default_port,
+ * the return value shall be zero.
+ *
+ * NOTE Don't call this method directly. The ntb_peer_port_idx() function
+ * should be used instead.
+ *
+ * Return: the peer port index or negative value indicating an error
+ */
+int ntb_default_peer_port_idx(struct ntb_dev *ntb, int port);
+
+/**
+ * ntb_port_number() - get the local port number
+ * @ntb: NTB device context.
+ *
+ * Hardware must support at least simple two-ports ntb connection
+ *
+ * Return: the local port number
*/
-static inline int ntb_mw_count(struct ntb_dev *ntb)
+static inline int ntb_port_number(struct ntb_dev *ntb)
{
- return ntb->ops->mw_count(ntb);
+ if (!ntb->ops->port_number)
+ return ntb_default_port_number(ntb);
+
+ return ntb->ops->port_number(ntb);
}
/**
- * ntb_mw_get_range() - get the range of a memory window
+ * ntb_peer_port_count() - get the number of peer device ports
* @ntb: NTB device context.
- * @idx: Memory window number.
- * @base: OUT - the base address for mapping the memory window
- * @size: OUT - the size for mapping the memory window
- * @align: OUT - the base alignment for translating the memory window
- * @align_size: OUT - the size alignment for translating the memory window
*
- * Get the range of a memory window. NULL may be given for any output
- * parameter if the value is not needed. The base and size may be used for
- * mapping the memory window, to access the peer memory. The alignment and
- * size may be used for translating the memory window, for the peer to access
- * memory on the local system.
+ * Hardware may support an access to memory of several remote domains
+ * over multi-port NTB devices. This method returns the number of peers,
+ * local device can have shared memory with.
*
- * Return: Zero on success, otherwise an error number.
+ * Return: the number of peer ports
*/
-static inline int ntb_mw_get_range(struct ntb_dev *ntb, int idx,
- phys_addr_t *base, resource_size_t *size,
- resource_size_t *align, resource_size_t *align_size)
+static inline int ntb_peer_port_count(struct ntb_dev *ntb)
{
- return ntb->ops->mw_get_range(ntb, idx, base, size,
- align, align_size);
+ if (!ntb->ops->peer_port_count)
+ return ntb_default_peer_port_count(ntb);
+
+ return ntb->ops->peer_port_count(ntb);
}
/**
- * ntb_mw_set_trans() - set the translation of a memory window
+ * ntb_peer_port_number() - get the peer port by given index
* @ntb: NTB device context.
- * @idx: Memory window number.
- * @addr: The dma address local memory to expose to the peer.
- * @size: The size of the local memory to expose to the peer.
+ * @pidx: Peer port index.
*
- * Set the translation of a memory window. The peer may access local memory
- * through the window starting at the address, up to the size. The address
- * must be aligned to the alignment specified by ntb_mw_get_range(). The size
- * must be aligned to the size alignment specified by ntb_mw_get_range().
+ * Peer ports are continuously enumerated by NTB API logic, so this method
+ * lets to retrieve port real number by its index.
*
- * Return: Zero on success, otherwise an error number.
+ * Return: the peer device port or negative value indicating an error
*/
-static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
- dma_addr_t addr, resource_size_t size)
+static inline int ntb_peer_port_number(struct ntb_dev *ntb, int pidx)
{
- return ntb->ops->mw_set_trans(ntb, idx, addr, size);
+ if (!ntb->ops->peer_port_number)
+ return ntb_default_peer_port_number(ntb, pidx);
+
+ return ntb->ops->peer_port_number(ntb, pidx);
}
/**
- * ntb_mw_clear_trans() - clear the translation of a memory window
+ * ntb_peer_port_idx() - get the peer device port index by given port number
* @ntb: NTB device context.
- * @idx: Memory window number.
+ * @port: Peer port number.
*
- * Clear the translation of a memory window. The peer may no longer access
- * local memory through the window.
+ * Inverse operation of ntb_peer_port_number(), so one can get port index
+ * by specified port number.
*
- * Return: Zero on success, otherwise an error number.
+ * Return: the peer port index or negative value indicating an error
*/
-static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
+static inline int ntb_peer_port_idx(struct ntb_dev *ntb, int port)
{
- if (!ntb->ops->mw_clear_trans)
- return ntb->ops->mw_set_trans(ntb, idx, 0, 0);
+ if (!ntb->ops->peer_port_idx)
+ return ntb_default_peer_port_idx(ntb, port);
- return ntb->ops->mw_clear_trans(ntb, idx);
+ return ntb->ops->peer_port_idx(ntb, port);
}
/**
@@ -526,25 +674,26 @@ static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
* state once after every link event. It is safe to query the link state in
* the context of the link event callback.
*
- * Return: One if the link is up, zero if the link is down, otherwise a
- * negative value indicating the error number.
+ * Return: bitfield of indexed ports link state: bit is set/cleared if the
+ * link is up/down respectively.
*/
-static inline int ntb_link_is_up(struct ntb_dev *ntb,
+static inline u64 ntb_link_is_up(struct ntb_dev *ntb,
enum ntb_speed *speed, enum ntb_width *width)
{
return ntb->ops->link_is_up(ntb, speed, width);
}
/**
- * ntb_link_enable() - enable the link on the secondary side of the ntb
+ * ntb_link_enable() - enable the local port ntb connection
* @ntb: NTB device context.
* @max_speed: The maximum link speed expressed as PCIe generation number.
* @max_width: The maximum link width expressed as the number of PCIe lanes.
*
- * Enable the link on the secondary side of the ntb. This can only be done
- * from the primary side of the ntb in primary or b2b topology. The ntb device
- * should train the link to its maximum speed and width, or the requested speed
- * and width, whichever is smaller, if supported.
+ * Enable the NTB/PCIe link on the local or remote (for bridge-to-bridge
+ * topology) side of the bridge. If it's supported the ntb device should train
+ * the link to its maximum speed and width, or the requested speed and width,
+ * whichever is smaller. Some hardware doesn't support PCIe link training, so
+ * the last two arguments will be ignored then.
*
* Return: Zero on success, otherwise an error number.
*/
@@ -556,14 +705,14 @@ static inline int ntb_link_enable(struct ntb_dev *ntb,
}
/**
- * ntb_link_disable() - disable the link on the secondary side of the ntb
+ * ntb_link_disable() - disable the local port ntb connection
* @ntb: NTB device context.
*
- * Disable the link on the secondary side of the ntb. This can only be
- * done from the primary side of the ntb in primary or b2b topology. The ntb
- * device should disable the link. Returning from this call must indicate that
- * a barrier has passed, though with no more writes may pass in either
- * direction across the link, except if this call returns an error number.
+ * Disable the link on the local or remote (for b2b topology) of the ntb.
+ * The ntb device should disable the link. Returning from this call must
+ * indicate that a barrier has passed, though with no more writes may pass in
+ * either direction across the link, except if this call returns an error
+ * number.
*
* Return: Zero on success, otherwise an error number.
*/
@@ -573,6 +722,183 @@ static inline int ntb_link_disable(struct ntb_dev *ntb)
}
/**
+ * ntb_mw_count() - get the number of inbound memory windows, which could
+ * be created for a specified peer device
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ *
+ * Hardware and topology may support a different number of memory windows.
+ * Moreover different peer devices can support different number of memory
+ * windows. Simply speaking this method returns the number of possible inbound
+ * memory windows to share with specified peer device.
+ *
+ * Return: the number of memory windows.
+ */
+static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ return ntb->ops->mw_count(ntb, pidx);
+}
+
+/**
+ * ntb_mw_get_align() - get the restriction parameters of inbound memory window
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ * @widx: Memory window index.
+ * @addr_align: OUT - the base alignment for translating the memory window
+ * @size_align: OUT - the size alignment for translating the memory window
+ * @size_max: OUT - the maximum size of the memory window
+ *
+ * Get the alignments of an inbound memory window with specified index.
+ * NULL may be given for any output parameter if the value is not needed.
+ * The alignment and size parameters may be used for allocation of proper
+ * shared memory.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
+ size_max);
+}
+
+/**
+ * ntb_mw_set_trans() - set the translation of an inbound memory window
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ * @widx: Memory window index.
+ * @addr: The dma address of local memory to expose to the peer.
+ * @size: The size of the local memory to expose to the peer.
+ *
+ * Set the translation of a memory window. The peer may access local memory
+ * through the window starting at the address, up to the size. The address
+ * and size must be aligned in compliance with restrictions of
+ * ntb_mw_get_align(). The region size should not exceed the size_max parameter
+ * of that method.
+ *
+ * This method may not be implemented due to the hardware specific memory
+ * windows interface.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
+ dma_addr_t addr, resource_size_t size)
+{
+ if (!ntb->ops->mw_set_trans)
+ return 0;
+
+ return ntb->ops->mw_set_trans(ntb, pidx, widx, addr, size);
+}
+
+/**
+ * ntb_mw_clear_trans() - clear the translation address of an inbound memory
+ * window
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ * @widx: Memory window index.
+ *
+ * Clear the translation of an inbound memory window. The peer may no longer
+ * access local memory through the window.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int pidx, int widx)
+{
+ if (!ntb->ops->mw_clear_trans)
+ return ntb_mw_set_trans(ntb, pidx, widx, 0, 0);
+
+ return ntb->ops->mw_clear_trans(ntb, pidx, widx);
+}
+
+/**
+ * ntb_peer_mw_count() - get the number of outbound memory windows, which could
+ * be mapped to access a shared memory
+ * @ntb: NTB device context.
+ *
+ * Hardware and topology may support a different number of memory windows.
+ * This method returns the number of outbound memory windows supported by
+ * local device.
+ *
+ * Return: the number of memory windows.
+ */
+static inline int ntb_peer_mw_count(struct ntb_dev *ntb)
+{
+ return ntb->ops->peer_mw_count(ntb);
+}
+
+/**
+ * ntb_peer_mw_get_addr() - get map address of an outbound memory window
+ * @ntb: NTB device context.
+ * @widx: Memory window index (within ntb_peer_mw_count() return value).
+ * @base: OUT - the base address of mapping region.
+ * @size: OUT - the size of mapping region.
+ *
+ * Get base and size of memory region to map. NULL may be given for any output
+ * parameter if the value is not needed. The base and size may be used for
+ * mapping the memory window, to access the peer memory.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx,
+ phys_addr_t *base, resource_size_t *size)
+{
+ return ntb->ops->peer_mw_get_addr(ntb, widx, base, size);
+}
+
+/**
+ * ntb_peer_mw_set_trans() - set a translation address of a memory window
+ * retrieved from a peer device
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device the translation address received from.
+ * @widx: Memory window index.
+ * @addr: The dma address of the shared memory to access.
+ * @size: The size of the shared memory to access.
+ *
+ * Set the translation of an outbound memory window. The local device may
+ * access shared memory allocated by a peer device sent the address.
+ *
+ * This method may not be implemented due to the hardware specific memory
+ * windows interface, so a translation address can be only set on the side,
+ * where shared memory (inbound memory windows) is allocated.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
+ u64 addr, resource_size_t size)
+{
+ if (!ntb->ops->peer_mw_set_trans)
+ return 0;
+
+ return ntb->ops->peer_mw_set_trans(ntb, pidx, widx, addr, size);
+}
+
+/**
+ * ntb_peer_mw_clear_trans() - clear the translation address of an outbound
+ * memory window
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ * @widx: Memory window index.
+ *
+ * Clear the translation of a outbound memory window. The local device may no
+ * longer access a shared memory through the window.
+ *
+ * This method may not be implemented due to the hardware specific memory
+ * windows interface.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx,
+ int widx)
+{
+ if (!ntb->ops->peer_mw_clear_trans)
+ return ntb_peer_mw_set_trans(ntb, pidx, widx, 0, 0);
+
+ return ntb->ops->peer_mw_clear_trans(ntb, pidx, widx);
+}
+
+/**
* ntb_db_is_unsafe() - check if it is safe to use hardware doorbell
* @ntb: NTB device context.
*
@@ -900,47 +1226,58 @@ static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb)
* @ntb: NTB device context.
*
* Hardware and topology may support a different number of scratchpads.
+ * Although it must be the same for all ports per NTB device.
*
* Return: the number of scratchpads.
*/
static inline int ntb_spad_count(struct ntb_dev *ntb)
{
+ if (!ntb->ops->spad_count)
+ return 0;
+
return ntb->ops->spad_count(ntb);
}
/**
* ntb_spad_read() - read the local scratchpad register
* @ntb: NTB device context.
- * @idx: Scratchpad index.
+ * @sidx: Scratchpad index.
*
* Read the local scratchpad register, and return the value.
*
* Return: The value of the local scratchpad register.
*/
-static inline u32 ntb_spad_read(struct ntb_dev *ntb, int idx)
+static inline u32 ntb_spad_read(struct ntb_dev *ntb, int sidx)
{
- return ntb->ops->spad_read(ntb, idx);
+ if (!ntb->ops->spad_read)
+ return ~(u32)0;
+
+ return ntb->ops->spad_read(ntb, sidx);
}
/**
* ntb_spad_write() - write the local scratchpad register
* @ntb: NTB device context.
- * @idx: Scratchpad index.
+ * @sidx: Scratchpad index.
* @val: Scratchpad value.
*
* Write the value to the local scratchpad register.
*
* Return: Zero on success, otherwise an error number.
*/
-static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val)
{
- return ntb->ops->spad_write(ntb, idx, val);
+ if (!ntb->ops->spad_write)
+ return -EINVAL;
+
+ return ntb->ops->spad_write(ntb, sidx, val);
}
/**
* ntb_peer_spad_addr() - address of the peer scratchpad register
* @ntb: NTB device context.
- * @idx: Scratchpad index.
+ * @pidx: Port index of peer device.
+ * @sidx: Scratchpad index.
* @spad_addr: OUT - The address of the peer scratchpad register.
*
* Return the address of the peer doorbell register. This may be used, for
@@ -948,45 +1285,213 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
*
* Return: Zero on success, otherwise an error number.
*/
-static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
+static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
phys_addr_t *spad_addr)
{
if (!ntb->ops->peer_spad_addr)
return -EINVAL;
- return ntb->ops->peer_spad_addr(ntb, idx, spad_addr);
+ return ntb->ops->peer_spad_addr(ntb, pidx, sidx, spad_addr);
}
/**
* ntb_peer_spad_read() - read the peer scratchpad register
* @ntb: NTB device context.
- * @idx: Scratchpad index.
+ * @pidx: Port index of peer device.
+ * @sidx: Scratchpad index.
*
* Read the peer scratchpad register, and return the value.
*
* Return: The value of the local scratchpad register.
*/
-static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
+static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
{
if (!ntb->ops->peer_spad_read)
- return 0;
+ return ~(u32)0;
- return ntb->ops->peer_spad_read(ntb, idx);
+ return ntb->ops->peer_spad_read(ntb, pidx, sidx);
}
/**
* ntb_peer_spad_write() - write the peer scratchpad register
* @ntb: NTB device context.
- * @idx: Scratchpad index.
+ * @pidx: Port index of peer device.
+ * @sidx: Scratchpad index.
* @val: Scratchpad value.
*
* Write the value to the peer scratchpad register.
*
* Return: Zero on success, otherwise an error number.
*/
-static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
+ u32 val)
+{
+ if (!ntb->ops->peer_spad_write)
+ return -EINVAL;
+
+ return ntb->ops->peer_spad_write(ntb, pidx, sidx, val);
+}
+
+/**
+ * ntb_msg_count() - get the number of message registers
+ * @ntb: NTB device context.
+ *
+ * Hardware may support a different number of message registers.
+ *
+ * Return: the number of message registers.
+ */
+static inline int ntb_msg_count(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->msg_count)
+ return 0;
+
+ return ntb->ops->msg_count(ntb);
+}
+
+/**
+ * ntb_msg_inbits() - get a bitfield of inbound message registers status
+ * @ntb: NTB device context.
+ *
+ * The method returns the bitfield of status and mask registers, which related
+ * to inbound message registers.
+ *
+ * Return: bitfield of inbound message registers.
+ */
+static inline u64 ntb_msg_inbits(struct ntb_dev *ntb)
{
- return ntb->ops->peer_spad_write(ntb, idx, val);
+ if (!ntb->ops->msg_inbits)
+ return 0;
+
+ return ntb->ops->msg_inbits(ntb);
+}
+
+/**
+ * ntb_msg_outbits() - get a bitfield of outbound message registers status
+ * @ntb: NTB device context.
+ *
+ * The method returns the bitfield of status and mask registers, which related
+ * to outbound message registers.
+ *
+ * Return: bitfield of outbound message registers.
+ */
+static inline u64 ntb_msg_outbits(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->msg_outbits)
+ return 0;
+
+ return ntb->ops->msg_outbits(ntb);
+}
+
+/**
+ * ntb_msg_read_sts() - read the message registers status
+ * @ntb: NTB device context.
+ *
+ * Read the status of message register. Inbound and outbound message registers
+ * related bits can be filtered by masks retrieved from ntb_msg_inbits() and
+ * ntb_msg_outbits().
+ *
+ * Return: status bits of message registers
+ */
+static inline u64 ntb_msg_read_sts(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->msg_read_sts)
+ return 0;
+
+ return ntb->ops->msg_read_sts(ntb);
+}
+
+/**
+ * ntb_msg_clear_sts() - clear status bits of message registers
+ * @ntb: NTB device context.
+ * @sts_bits: Status bits to clear.
+ *
+ * Clear bits in the status register.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits)
+{
+ if (!ntb->ops->msg_clear_sts)
+ return -EINVAL;
+
+ return ntb->ops->msg_clear_sts(ntb, sts_bits);
+}
+
+/**
+ * ntb_msg_set_mask() - set mask of message register status bits
+ * @ntb: NTB device context.
+ * @mask_bits: Mask bits.
+ *
+ * Mask the message registers status bits from raising the message event.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits)
+{
+ if (!ntb->ops->msg_set_mask)
+ return -EINVAL;
+
+ return ntb->ops->msg_set_mask(ntb, mask_bits);
+}
+
+/**
+ * ntb_msg_clear_mask() - clear message registers mask
+ * @ntb: NTB device context.
+ * @mask_bits: Mask bits to clear.
+ *
+ * Clear bits in the message events mask register.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
+{
+ if (!ntb->ops->msg_clear_mask)
+ return -EINVAL;
+
+ return ntb->ops->msg_clear_mask(ntb, mask_bits);
+}
+
+/**
+ * ntb_msg_read() - read message register with specified index
+ * @ntb: NTB device context.
+ * @midx: Message register index
+ * @pidx: OUT - Port index of peer device a message retrieved from
+ * @msg: OUT - Data
+ *
+ * Read data from the specified message register. Source port index of a
+ * message is retrieved as well.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx,
+ u32 *msg)
+{
+ if (!ntb->ops->msg_read)
+ return -EINVAL;
+
+ return ntb->ops->msg_read(ntb, midx, pidx, msg);
+}
+
+/**
+ * ntb_msg_write() - write data to the specified message register
+ * @ntb: NTB device context.
+ * @midx: Message register index
+ * @pidx: Port index of peer device a message being sent to
+ * @msg: Data to send
+ *
+ * Send data to a specified peer device using the defined message register.
+ * Message event can be raised if the midx registers isn't empty while
+ * calling this method and the corresponding interrupt isn't masked.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx,
+ u32 msg)
+{
+ if (!ntb->ops->msg_write)
+ return -EINVAL;
+
+ return ntb->ops->msg_write(ntb, midx, pidx, msg);
}
#endif
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index cd93416d762e..497706f5adca 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -12,6 +12,9 @@
#ifndef _LINUX_NVMEM_PROVIDER_H
#define _LINUX_NVMEM_PROVIDER_H
+#include <linux/err.h>
+#include <linux/errno.h>
+
struct nvmem_device;
struct nvmem_cell_info;
typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
diff --git a/include/linux/once.h b/include/linux/once.h
index 285f12cb40e6..9c98aaa87cbc 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -53,5 +53,7 @@ void __do_once_done(bool *done, struct static_key *once_key,
#define get_random_once(buf, nbytes) \
DO_ONCE(get_random_bytes, (buf), (nbytes))
+#define get_random_once_wait(buf, nbytes) \
+ DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \
#endif /* _LINUX_ONCE_H */
diff --git a/include/linux/path.h b/include/linux/path.h
index d1372186f431..cde895cc4af4 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -7,7 +7,7 @@ struct vfsmount;
struct path {
struct vfsmount *mnt;
struct dentry *dentry;
-};
+} __randomize_layout;
extern void path_get(const struct path *);
extern void path_put(const struct path *);
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index c2a989dee876..b09136f88cf4 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -52,7 +52,7 @@ struct pid_namespace {
int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
-};
+} __randomize_layout;
extern struct pid_namespace init_pid_ns;
diff --git a/include/linux/platform_data/usb-ohci-s3c2410.h b/include/linux/platform_data/usb-ohci-s3c2410.h
index 7fa1fbefc3f2..cc7554ae6e8b 100644
--- a/include/linux/platform_data/usb-ohci-s3c2410.h
+++ b/include/linux/platform_data/usb-ohci-s3c2410.h
@@ -31,7 +31,7 @@ struct s3c2410_hcd_info {
void (*report_oc)(struct s3c2410_hcd_info *, int ports);
};
-static void inline s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports)
+static inline void s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports)
{
if (info->report_oc != NULL) {
(info->report_oc)(info, ports);
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 58ab28d81fc2..06844b54dfc1 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -21,7 +21,7 @@ struct proc_ns_operations {
int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
struct user_namespace *(*owner)(struct ns_common *ns);
struct ns_common *(*get_parent)(struct ns_common *ns);
-};
+} __randomize_layout;
extern const struct proc_ns_operations netns_operations;
extern const struct proc_ns_operations utsns_operations;
diff --git a/include/linux/random.h b/include/linux/random.h
index ed5c3838780d..eafea6a09361 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -34,6 +34,7 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
+extern int wait_for_random_bytes(void);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes);
@@ -57,6 +58,52 @@ static inline unsigned long get_random_long(void)
#endif
}
+/*
+ * On 64-bit architectures, protect against non-terminated C string overflows
+ * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
+ */
+#ifdef CONFIG_64BIT
+# ifdef __LITTLE_ENDIAN
+# define CANARY_MASK 0xffffffffffffff00UL
+# else /* big endian, 64 bits: */
+# define CANARY_MASK 0x00ffffffffffffffUL
+# endif
+#else /* 32 bits: */
+# define CANARY_MASK 0xffffffffUL
+#endif
+
+static inline unsigned long get_random_canary(void)
+{
+ unsigned long val = get_random_long();
+
+ return val & CANARY_MASK;
+}
+
+/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
+ * Returns the result of the call to wait_for_random_bytes. */
+static inline int get_random_bytes_wait(void *buf, int nbytes)
+{
+ int ret = wait_for_random_bytes();
+ if (unlikely(ret))
+ return ret;
+ get_random_bytes(buf, nbytes);
+ return 0;
+}
+
+#define declare_get_random_var_wait(var) \
+ static inline int get_random_ ## var ## _wait(var *out) { \
+ int ret = wait_for_random_bytes(); \
+ if (unlikely(ret)) \
+ return ret; \
+ *out = get_random_ ## var(); \
+ return 0; \
+ }
+declare_get_random_var_wait(u32)
+declare_get_random_var_wait(u64)
+declare_get_random_var_wait(int)
+declare_get_random_var_wait(long)
+#undef declare_get_random_var
+
unsigned long randomize_page(unsigned long start, unsigned long range);
u32 prandom_u32(void);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index b693adac853b..0a0f0d14a5fb 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/nvmem-provider.h>
#include <uapi/linux/rtc.h>
extern int rtc_month_days(unsigned int month, unsigned int year);
@@ -32,17 +33,11 @@ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs)
return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs);
}
-/**
- * Deprecated. Use rtc_time64_to_tm().
- */
static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
{
rtc_time64_to_tm(time, tm);
}
-/**
- * Deprecated. Use rtc_tm_to_time64().
- */
static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
{
*time = rtc_tm_to_time64(tm);
@@ -116,7 +111,6 @@ struct rtc_device {
struct module *owner;
int id;
- char name[RTC_DEVICE_NAME_SIZE];
const struct rtc_class_ops *ops;
struct mutex ops_lock;
@@ -143,6 +137,14 @@ struct rtc_device {
/* Some hardware can't support UIE mode */
int uie_unsupported;
+ bool registered;
+
+ struct nvmem_config *nvmem_config;
+ struct nvmem_device *nvmem;
+ /* Old ABI support */
+ bool nvram_old_abi;
+ struct bin_attribute *nvram;
+
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
struct work_struct uie_task;
struct timer_list uie_timer;
@@ -164,6 +166,8 @@ extern struct rtc_device *devm_rtc_device_register(struct device *dev,
const char *name,
const struct rtc_class_ops *ops,
struct module *owner);
+struct rtc_device *devm_rtc_allocate_device(struct device *dev);
+int __rtc_register_device(struct module *owner, struct rtc_device *rtc);
extern void rtc_device_unregister(struct rtc_device *rtc);
extern void devm_rtc_device_unregister(struct device *dev,
struct rtc_device *rtc);
@@ -219,6 +223,9 @@ static inline bool is_leap_year(unsigned int year)
return (!(year % 4) && (year % 100)) || !(year % 400);
}
+#define rtc_register_device(device) \
+ __rtc_register_device(THIS_MODULE, device)
+
#ifdef CONFIG_RTC_HCTOSYS_DEVICE
extern int rtc_hctosys_ret;
#else
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 20814b7d7d70..8337e2db0bb2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -426,7 +426,7 @@ struct sched_rt_entity {
/* rq "owned" by this entity/group: */
struct rt_rq *my_q;
#endif
-};
+} __randomize_layout;
struct sched_dl_entity {
struct rb_node rb_node;
@@ -526,6 +526,13 @@ struct task_struct {
#endif
/* -1 unrunnable, 0 runnable, >0 stopped: */
volatile long state;
+
+ /*
+ * This begins the randomizable portion of task_struct. Only
+ * scheduling-critical items should be added above here.
+ */
+ randomized_struct_fields_start
+
void *stack;
atomic_t usage;
/* Per task flags (PF_*), defined further below: */
@@ -974,6 +981,7 @@ struct task_struct {
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
+ unsigned int fail_nth;
#endif
/*
* When (nr_dirtied >= nr_dirtied_pause), it's time to call
@@ -1078,6 +1086,13 @@ struct task_struct {
/* Used by LSM modules for access restriction: */
void *security;
#endif
+
+ /*
+ * New fields for task_struct should be added above here, so that
+ * they are included in the randomized portion of task_struct.
+ */
+ randomized_struct_fields_end
+
/* CPU-specific state of this task: */
struct thread_struct thread;
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index c06d63b3a583..2a0dd40b15db 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -222,7 +222,7 @@ struct signal_struct {
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace) */
-};
+} __randomize_layout;
/*
* Bits in flags field of signal_struct.
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 9edec926e9d9..de2deb8676bd 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -8,11 +8,29 @@
struct task_struct;
+/* One semaphore structure for each semaphore in the system. */
+struct sem {
+ int semval; /* current value */
+ /*
+ * PID of the process that last modified the semaphore. For
+ * Linux, specifically these are:
+ * - semop
+ * - semctl, via SETVAL and SETALL.
+ * - at task exit when performing undo adjustments (see exit_sem).
+ */
+ int sempid;
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
+ struct list_head pending_alter; /* pending single-sop operations */
+ /* that alter the semaphore */
+ struct list_head pending_const; /* pending single-sop operations */
+ /* that do not alter the semaphore*/
+ time_t sem_otime; /* candidate for sem_otime */
+} ____cacheline_aligned_in_smp;
+
/* One sem_array data structure for each set of semaphores in the system. */
struct sem_array {
struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
- time_t sem_ctime; /* last change time */
- struct sem *sem_base; /* ptr to first semaphore in array */
+ time_t sem_ctime; /* create/last semctl() time */
struct list_head pending_alter; /* pending operations */
/* that alter the array */
struct list_head pending_const; /* pending complex operations */
@@ -21,7 +39,9 @@ struct sem_array {
int sem_nsems; /* no. of semaphores in array */
int complex_count; /* pending complex operations */
unsigned int use_global_lock;/* >0: global lock required */
-};
+
+ struct sem sems[];
+} __randomize_layout;
#ifdef CONFIG_SYSVIPC
diff --git a/include/linux/shm.h b/include/linux/shm.h
index 04e881829625..0fb7061ec54c 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -22,7 +22,7 @@ struct shmid_kernel /* private to the kernel */
/* The task created the shm object. NULL if the task is dead. */
struct task_struct *shm_creator;
struct list_head shm_clist; /* list by creator */
-};
+} __randomize_layout;
/* shm_mode upper byte flags */
#define SHM_DEST 01000 /* segment will be destroyed on last detach */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 04a7f7993e67..41473df6dfb0 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -471,7 +471,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
*
* %__GFP_NOWARN - If allocation fails, don't issue any warnings.
*
- * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
+ * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail
+ * eventually.
*
* There are other flags available as well, but these are not intended
* for general use, and so are not documented here. For a full list of
diff --git a/include/linux/string.h b/include/linux/string.h
index 7439d83eaa33..a467e617eeb0 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -137,6 +137,7 @@ extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
+extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
@@ -193,4 +194,205 @@ static inline const char *kbasename(const char *path)
return tail ? tail + 1 : path;
}
+#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline))
+#define __RENAME(x) __asm__(#x)
+
+void fortify_panic(const char *name) __noreturn __cold;
+void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter");
+void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter");
+void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
+
+#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
+__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_strncpy(p, q, size);
+}
+
+__FORTIFY_INLINE char *strcat(char *p, const char *q)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (p_size == (size_t)-1)
+ return __builtin_strcat(p, q);
+ if (strlcat(p, q, p_size) >= p_size)
+ fortify_panic(__func__);
+ return p;
+}
+
+__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
+{
+ __kernel_size_t ret;
+ size_t p_size = __builtin_object_size(p, 0);
+ if (p_size == (size_t)-1)
+ return __builtin_strlen(p);
+ ret = strnlen(p, p_size);
+ if (p_size <= ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+/* defined after fortified strlen to reuse it */
+extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
+__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
+{
+ size_t ret;
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __real_strlcpy(p, q, size);
+ ret = strlen(q);
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ if (__builtin_constant_p(len) && len >= p_size)
+ __write_overflow();
+ if (len >= p_size)
+ fortify_panic(__func__);
+ __builtin_memcpy(p, q, len);
+ p[len] = '\0';
+ }
+ return ret;
+}
+
+/* defined after fortified strlen and strnlen to reuse them */
+__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
+{
+ size_t p_len, copy_len;
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strncat(p, q, count);
+ p_len = strlen(p);
+ copy_len = strnlen(q, count);
+ if (p_size < p_len + copy_len + 1)
+ fortify_panic(__func__);
+ __builtin_memcpy(p + p_len, q, copy_len);
+ p[p_len + copy_len] = '\0';
+ return p;
+}
+
+__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_memset(p, c, size);
+}
+
+__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __write_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memcpy(p, q, size);
+}
+
+__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __write_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memmove(p, q, size);
+}
+
+extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
+__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memscan(p, c, size);
+}
+
+__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __read_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memcmp(p, q, size);
+}
+
+__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_memchr(p, c, size);
+}
+
+void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
+__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memchr_inv(p, c, size);
+}
+
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
+__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_kmemdup(p, size, gfp);
+}
+
+/* defined after fortified strlen and memcpy to reuse them */
+__FORTIFY_INLINE char *strcpy(char *p, const char *q)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strcpy(p, q);
+ memcpy(p, q, strlen(q) + 1);
+ return p;
+}
+
+#endif
+
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 6095ecba0dde..55ef67bea06b 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -39,7 +39,7 @@ struct rpc_clnt {
struct list_head cl_tasks; /* List of tasks */
spinlock_t cl_lock; /* spinlock */
struct rpc_xprt __rcu * cl_xprt; /* transport */
- struct rpc_procinfo * cl_procinfo; /* procedure info */
+ const struct rpc_procinfo *cl_procinfo; /* procedure info */
u32 cl_prog, /* RPC program number */
cl_vers, /* RPC version number */
cl_maxproc; /* max procedure number */
@@ -87,7 +87,8 @@ struct rpc_program {
struct rpc_version {
u32 number; /* version number */
unsigned int nrprocs; /* number of procs */
- struct rpc_procinfo * procs; /* procedure array */
+ const struct rpc_procinfo *procs; /* procedure array */
+ unsigned int *counts; /* call counts */
};
/*
@@ -99,7 +100,6 @@ struct rpc_procinfo {
kxdrdproc_t p_decode; /* XDR decode function */
unsigned int p_arglen; /* argument hdr length (u32) */
unsigned int p_replen; /* reply hdr length (u32) */
- unsigned int p_count; /* call count */
unsigned int p_timer; /* Which RTT timer to use */
u32 p_statidx; /* Which procedure to account */
const char * p_name; /* name of procedure */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 9d7529ffc4ce..50a99a117da7 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -22,7 +22,7 @@
*/
struct rpc_procinfo;
struct rpc_message {
- struct rpc_procinfo * rpc_proc; /* Procedure information */
+ const struct rpc_procinfo *rpc_proc; /* Procedure information */
void * rpc_argp; /* Arguments */
void * rpc_resp; /* Result */
struct rpc_cred * rpc_cred; /* Credentials */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 11cef5a7bc87..a3f8af9bd543 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -237,7 +237,7 @@ struct svc_rqst {
struct svc_serv * rq_server; /* RPC service definition */
struct svc_pool * rq_pool; /* thread pool */
- struct svc_procedure * rq_procinfo; /* procedure info */
+ const struct svc_procedure *rq_procinfo;/* procedure info */
struct auth_ops * rq_authop; /* authentication flavour */
struct svc_cred rq_cred; /* auth info */
void * rq_xprt_ctxt; /* transport specific context ptr */
@@ -246,7 +246,7 @@ struct svc_rqst {
size_t rq_xprt_hlen; /* xprt header len */
struct xdr_buf rq_arg;
struct xdr_buf rq_res;
- struct page * rq_pages[RPCSVC_MAXPAGES];
+ struct page *rq_pages[RPCSVC_MAXPAGES + 1];
struct page * *rq_respages; /* points into rq_pages */
struct page * *rq_next_page; /* next reply page to use */
struct page * *rq_page_end; /* one past the last page */
@@ -384,7 +384,7 @@ struct svc_program {
unsigned int pg_lovers; /* lowest version */
unsigned int pg_hivers; /* highest version */
unsigned int pg_nvers; /* number of versions */
- struct svc_version ** pg_vers; /* version array */
+ const struct svc_version **pg_vers; /* version array */
char * pg_name; /* service name */
char * pg_class; /* class name: services sharing authentication */
struct svc_stat * pg_stats; /* rpc statistics */
@@ -397,7 +397,8 @@ struct svc_program {
struct svc_version {
u32 vs_vers; /* version number */
u32 vs_nproc; /* number of procedures */
- struct svc_procedure * vs_proc; /* per-procedure info */
+ const struct svc_procedure *vs_proc; /* per-procedure info */
+ unsigned int *vs_count; /* call counts */
u32 vs_xdrsize; /* xdrsize needed for this version */
/* Don't register with rpcbind */
@@ -419,15 +420,17 @@ struct svc_version {
/*
* RPC procedure info
*/
-typedef __be32 (*svc_procfunc)(struct svc_rqst *, void *argp, void *resp);
struct svc_procedure {
- svc_procfunc pc_func; /* process the request */
- kxdrproc_t pc_decode; /* XDR decode args */
- kxdrproc_t pc_encode; /* XDR encode result */
- kxdrproc_t pc_release; /* XDR free result */
+ /* process the request: */
+ __be32 (*pc_func)(struct svc_rqst *);
+ /* XDR decode args: */
+ int (*pc_decode)(struct svc_rqst *, __be32 *data);
+ /* XDR encode result: */
+ int (*pc_encode)(struct svc_rqst *, __be32 *data);
+ /* XDR free result: */
+ void (*pc_release)(struct svc_rqst *);
unsigned int pc_argsize; /* argument struct size */
unsigned int pc_ressize; /* result struct size */
- unsigned int pc_count; /* call count */
unsigned int pc_cachetype; /* cache info (NFS) */
unsigned int pc_xdrressize; /* maximum size of XDR reply */
};
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index f3787d800ba4..995c6fe9ee90 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -77,46 +77,25 @@ extern atomic_t rdma_stat_sq_prod;
*/
struct svc_rdma_op_ctxt {
struct list_head list;
- struct svc_rdma_op_ctxt *read_hdr;
- struct svc_rdma_fastreg_mr *frmr;
- int hdr_count;
struct xdr_buf arg;
struct ib_cqe cqe;
- struct ib_cqe reg_cqe;
- struct ib_cqe inv_cqe;
u32 byte_len;
- u32 position;
struct svcxprt_rdma *xprt;
- unsigned long flags;
enum dma_data_direction direction;
int count;
unsigned int mapped_sges;
+ int hdr_count;
struct ib_send_wr send_wr;
struct ib_sge sge[1 + RPCRDMA_MAX_INLINE_THRESH / PAGE_SIZE];
struct page *pages[RPCSVC_MAXPAGES];
};
-struct svc_rdma_fastreg_mr {
- struct ib_mr *mr;
- struct scatterlist *sg;
- int sg_nents;
- unsigned long access_flags;
- enum dma_data_direction direction;
- struct list_head frmr_list;
-};
-
-#define RDMACTXT_F_LAST_CTXT 2
-
-#define SVCRDMA_DEVCAP_FAST_REG 1 /* fast mr registration */
-#define SVCRDMA_DEVCAP_READ_W_INV 2 /* read w/ invalidate */
-
struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
struct list_head sc_accept_q; /* Conn. waiting accept */
int sc_ord; /* RDMA read limit */
int sc_max_sge;
- int sc_max_sge_rd; /* max sge for read target */
bool sc_snd_w_inv; /* OK to use Send With Invalidate */
atomic_t sc_sq_avail; /* SQEs ready to be consumed */
@@ -141,14 +120,6 @@ struct svcxprt_rdma {
struct ib_qp *sc_qp;
struct ib_cq *sc_rq_cq;
struct ib_cq *sc_sq_cq;
- int (*sc_reader)(struct svcxprt_rdma *,
- struct svc_rqst *,
- struct svc_rdma_op_ctxt *,
- int *, u32 *, u32, u32, u64, bool);
- u32 sc_dev_caps; /* distilled device caps */
- unsigned int sc_frmr_pg_list_len;
- struct list_head sc_frmr_q;
- spinlock_t sc_frmr_q_lock;
spinlock_t sc_lock; /* transport lock */
@@ -185,20 +156,14 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
__be32 *rdma_resp,
struct xdr_buf *rcvbuf);
-/* svc_rdma_marshal.c */
-extern int svc_rdma_xdr_decode_req(struct xdr_buf *);
-
/* svc_rdma_recvfrom.c */
extern int svc_rdma_recvfrom(struct svc_rqst *);
-extern int rdma_read_chunk_lcl(struct svcxprt_rdma *, struct svc_rqst *,
- struct svc_rdma_op_ctxt *, int *, u32 *,
- u32, u32, u64, bool);
-extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
- struct svc_rdma_op_ctxt *, int *, u32 *,
- u32, u32, u64, bool);
/* svc_rdma_rw.c */
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
+extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma,
+ struct svc_rqst *rqstp,
+ struct svc_rdma_op_ctxt *head, __be32 *p);
extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
__be32 *wr_ch, struct xdr_buf *xdr);
extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
@@ -226,9 +191,6 @@ extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
-extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
-extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
- struct svc_rdma_fastreg_mr *);
extern void svc_sq_reap(struct svcxprt_rdma *);
extern void svc_rq_reap(struct svcxprt_rdma *);
extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 054c8cde18f3..261b48a2701d 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -17,6 +17,8 @@
#include <asm/unaligned.h>
#include <linux/scatterlist.h>
+struct rpc_rqst;
+
/*
* Buffer adjustment
*/
@@ -33,13 +35,6 @@ struct xdr_netobj {
};
/*
- * This is the legacy generic XDR function. rqstp is either a rpc_rqst
- * (client side) or svc_rqst pointer (server side).
- * Encode functions always assume there's enough room in the buffer.
- */
-typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
-
-/*
* Basic structure for transmission/reception of a client XDR message.
* Features a header (for a linear buffer containing RPC headers
* and the data payload for short messages), and then an array of
@@ -222,8 +217,10 @@ struct xdr_stream {
/*
* These are the xdr_stream style generic XDR encode and decode functions.
*/
-typedef void (*kxdreproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
-typedef int (*kxdrdproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+typedef void (*kxdreproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ const void *obj);
+typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ void *obj);
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 80d07816def0..1d4dba490fb6 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -47,6 +47,9 @@ extern int proc_douintvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+extern int proc_douintvec_minmax(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
extern int proc_dointvec_jiffies(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
@@ -117,7 +120,7 @@ struct ctl_table
struct ctl_table_poll *poll;
void *extra1;
void *extra2;
-};
+} __randomize_layout;
struct ctl_node {
struct rb_node node;
@@ -143,7 +146,7 @@ struct ctl_table_header
struct ctl_table_set *set;
struct ctl_dir *parent;
struct ctl_node *node;
- struct list_head inodes; /* head for proc_inode->sysctl_inodes */
+ struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */
};
struct ctl_dir {
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 69464c0d8068..79c30daf46a9 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -332,7 +332,7 @@ struct tty_struct {
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
-};
+} __randomize_layout;
/* Each of a tty's open files has private_data pointing to tty_file_private */
struct tty_file_private {
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index b742b5e47cc2..00b2213f6a35 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -291,7 +291,7 @@ struct tty_operations {
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
const struct file_operations *proc_fops;
-};
+} __randomize_layout;
struct tty_driver {
int magic; /* magic number for this structure */
@@ -325,7 +325,7 @@ struct tty_driver {
const struct tty_operations *ops;
struct list_head tty_drivers;
-};
+} __randomize_layout;
extern struct list_head tty_drivers;
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 021f7a88f52c..1a59699cf82a 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -83,6 +83,7 @@
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */
+#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 32354b4b4b2b..b3575ce29148 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -66,7 +66,7 @@ struct user_namespace {
#endif
struct ucounts *ucounts;
int ucount_max[UCOUNT_COUNTS];
-};
+} __randomize_layout;
struct ucounts {
struct hlist_node node;
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 60f0bb83b313..da826ed059cf 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -26,7 +26,7 @@ struct uts_namespace {
struct user_namespace *user_ns;
struct ucounts *ucounts;
struct ns_common ns;
-};
+} __randomize_layout;
extern struct uts_namespace init_uts_ns;
#ifdef CONFIG_UTS_NS
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index f57076b958b7..586809abb273 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -97,6 +97,8 @@ extern void vfio_unregister_iommu_driver(
*/
extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
extern void vfio_group_put_external_user(struct vfio_group *group);
+extern bool vfio_external_group_match_file(struct vfio_group *group,
+ struct file *filep);
extern int vfio_external_user_iommu_id(struct vfio_group *group);
extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg);
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index b582339ccef5..7af9d769b97d 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -157,6 +157,18 @@ struct p9_client {
enum p9_trans_status status;
void *trans;
+ union {
+ struct {
+ int rfd;
+ int wfd;
+ } fd;
+ struct {
+ u16 port;
+ bool privport;
+
+ } tcp;
+ } trans_opts;
+
struct p9_idpool *fidpool;
struct list_head fidlist;
@@ -213,6 +225,7 @@ struct p9_dirent {
struct iov_iter;
+int p9_show_client_options(struct seq_file *m, struct p9_client *clnt);
int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb);
int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid,
const char *name);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 5122b5e40f78..1625fb842ac4 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -62,6 +62,7 @@ struct p9_trans_module {
int (*cancelled)(struct p9_client *, struct p9_req_t *req);
int (*zc_request)(struct p9_client *, struct p9_req_t *,
struct iov_iter *, struct iov_iter *, int , int, int);
+ int (*show_options)(struct seq_file *, struct p9_client *);
};
void v9fs_register_trans(struct p9_trans_module *m);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 3b3194b2fc65..afb37f835449 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -37,7 +37,7 @@ struct unix_skb_parms {
u32 secid; /* Security ID */
#endif
u32 consumed;
-};
+} __randomize_layout;
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index afc39e3a3f7c..9816df225af3 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -156,7 +156,7 @@ struct neighbour {
struct rcu_head rcu;
struct net_device *dev;
u8 primary_key[0];
-};
+} __randomize_layout;
struct neigh_ops {
int family;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 31a2b51bef2c..1c401bd4c2e0 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -148,7 +148,7 @@ struct net {
#endif
struct sock *diag_nlsk;
atomic_t fnhe_genid;
-};
+} __randomize_layout;
#include <linux/seq_file_net.h>
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 01709172b3d3..ef8e6c3a80a6 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -98,8 +98,8 @@
* nla_put_u8(skb, type, value) add u8 attribute to skb
* nla_put_u16(skb, type, value) add u16 attribute to skb
* nla_put_u32(skb, type, value) add u32 attribute to skb
- * nla_put_u64_64bits(skb, type,
- * value, padattr) add u64 attribute to skb
+ * nla_put_u64_64bit(skb, type,
+ * value, padattr) add u64 attribute to skb
* nla_put_s8(skb, type, value) add s8 attribute to skb
* nla_put_s16(skb, type, value) add s16 attribute to skb
* nla_put_s32(skb, type, value) add s32 attribute to skb
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a9519a06a23b..980807d7506f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -469,6 +469,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
#define _sctp_walk_params(pos, chunk, end, member)\
for (pos.v = chunk->member;\
+ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <\
+ (void *)chunk + end) &&\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(struct sctp_paramhdr);\
pos.v += SCTP_PAD4(ntohs(pos.p->length)))
@@ -479,6 +481,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
#define _sctp_walk_errors(err, chunk_hdr, end)\
for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(struct sctp_chunkhdr));\
+ ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <\
+ (void *)chunk_hdr + end) &&\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
diff --git a/include/net/sock.h b/include/net/sock.h
index f69c8c2782df..7c0632c7e870 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1128,7 +1128,7 @@ struct proto {
atomic_t socks;
#endif
int (*diag_destroy)(struct sock *sk, int err);
-};
+} __randomize_layout;
int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 356953d3dbd1..b5732432bb29 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1056,7 +1056,7 @@ enum ib_qp_create_flags {
IB_QP_CREATE_MANAGED_RECV = 1 << 4,
IB_QP_CREATE_NETIF_QP = 1 << 5,
IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
- IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
+ /* FREE = 1 << 7, */
IB_QP_CREATE_SCATTER_FCS = 1 << 8,
IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
/* reserve bits 26-31 for low level drivers' internal use */
@@ -2948,6 +2948,22 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr);
/**
+ * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
+ * @qp: The QP to modify.
+ * @attr: On input, specifies the QP attributes to modify. On output,
+ * the current values of selected QP attributes are returned.
+ * @attr_mask: A bit-mask used to specify which attributes of the QP
+ * are being modified.
+ * @udata: pointer to user's input output buffer information
+ * are being modified.
+ * It returns 0 on success and returns appropriate error code on error.
+ */
+int ib_modify_qp_with_udata(struct ib_qp *qp,
+ struct ib_qp_attr *attr,
+ int attr_mask,
+ struct ib_udata *udata);
+
+/**
* ib_modify_qp - Modifies the attributes for the specified QP and then
* transitions the QP to the given state.
* @qp: The QP to modify.
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 4878aaf7bdff..55af69271053 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -229,8 +229,7 @@ struct rvt_driver_provided {
* ERR_PTR(err). The driver is free to return NULL or a valid
* pointer.
*/
- void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- gfp_t gfp);
+ void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
/*
* Free the driver's private qp structure.
@@ -319,7 +318,7 @@ struct rvt_driver_provided {
/* Let the driver pick the next queue pair number*/
int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num, gfp_t gfp);
+ enum ib_qp_type type, u8 port_num);
/* Determine if its safe or allowed to modify the qp */
int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 8260700d662b..8c285d9a06d8 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -158,6 +158,7 @@
#define READ_32 0x09
#define VERIFY_32 0x0a
#define WRITE_32 0x0b
+#define WRITE_VERIFY_32 0x0c
#define WRITE_SAME_32 0x0d
#define ATA_32 0x1ff0
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 5f17fb770477..0ca1fb08805b 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -66,6 +66,14 @@ struct sock;
#define TA_DEFAULT_FABRIC_PROT_TYPE 0
/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
+/*
+ * Used to control the sending of keys with optional to respond state bit,
+ * as a workaround for non RFC compliant initiators,that do not propose,
+ * nor respond to specific keys required for login to complete.
+ *
+ * See iscsi_check_proposer_for_optional_reply() for more details.
+ */
+#define TA_DEFAULT_LOGIN_KEYS_WORKAROUND 1
#define ISCSI_IOV_DATA_BUFFER 5
@@ -560,7 +568,6 @@ struct iscsi_conn {
#define LOGIN_FLAGS_INITIAL_PDU 8
unsigned long login_flags;
struct delayed_work login_work;
- struct delayed_work login_cleanup_work;
struct iscsi_login *login;
struct timer_list nopin_timer;
struct timer_list nopin_response_timer;
@@ -769,6 +776,7 @@ struct iscsi_tpg_attrib {
u8 t10_pi;
u32 fabric_prot_type;
u32 tpg_enabled_sendtargets;
+ u32 login_keys_workaround;
struct iscsi_portal_group *tpg;
};
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index e475531565fd..e150e391878b 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -2,6 +2,7 @@
#define TARGET_CORE_BACKEND_H
#include <linux/types.h>
+#include <asm/unaligned.h>
#include <target/target_core_base.h>
#define TRANSPORT_FLAG_PASSTHROUGH 0x1
@@ -29,16 +30,13 @@ struct target_backend_ops {
struct se_device *(*alloc_device)(struct se_hba *, const char *);
int (*configure_device)(struct se_device *);
+ void (*destroy_device)(struct se_device *);
void (*free_device)(struct se_device *device);
ssize_t (*set_configfs_dev_params)(struct se_device *,
const char *, ssize_t);
ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
- void (*transport_complete)(struct se_cmd *cmd,
- struct scatterlist *,
- unsigned char *);
-
sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *);
@@ -71,6 +69,8 @@ void target_backend_unregister(const struct target_backend_ops *);
void target_complete_cmd(struct se_cmd *, u8);
void target_complete_cmd_with_length(struct se_cmd *, u8, int);
+void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *);
+
sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
@@ -104,9 +104,18 @@ bool target_lun_is_rdonly(struct se_cmd *);
sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
+struct se_device *target_find_device(int id, bool do_depend);
+
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q);
+
+/* Only use get_unaligned_be24() if reading p - 1 is allowed. */
+static inline uint32_t get_unaligned_be24(const uint8_t *const p)
+{
+ return get_unaligned_be32(p - 1) & 0xffffffU;
+}
+
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 0c1dce2ac6f0..516764febeb7 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -188,7 +188,8 @@ enum target_sc_flags_table {
TARGET_SCF_BIDI_OP = 0x01,
TARGET_SCF_ACK_KREF = 0x02,
TARGET_SCF_UNKNOWN_SIZE = 0x04,
- TARGET_SCF_USE_CPUID = 0x08,
+ TARGET_SCF_USE_CPUID = 0x08,
+ TARGET_SCF_LOOKUP_LUN_FROM_TAG = 0x10,
};
/* fabric independent task management function values */
@@ -218,7 +219,6 @@ enum tcm_tmrsp_table {
*/
typedef enum {
SCSI_INST_INDEX,
- SCSI_DEVICE_INDEX,
SCSI_AUTH_INTR_INDEX,
SCSI_INDEX_TYPE_MAX
} scsi_index_t;
@@ -701,8 +701,6 @@ struct scsi_port_stats {
struct se_lun {
u64 unpacked_lun;
-#define SE_LUN_LINK_MAGIC 0xffff7771
- u32 lun_link_magic;
bool lun_shutdown;
bool lun_access_ro;
u32 lun_index;
@@ -746,8 +744,6 @@ struct se_dev_stat_grps {
};
struct se_device {
-#define SE_DEV_LINK_MAGIC 0xfeeddeef
- u32 dev_link_magic;
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
@@ -800,7 +796,6 @@ struct se_device {
struct list_head delayed_cmd_list;
struct list_head state_list;
struct list_head qf_cmd_list;
- struct list_head g_dev_node;
/* Pointer to associated SE HBA */
struct se_hba *se_hba;
/* T10 Inquiry and VPD WWN Information */
@@ -819,8 +814,6 @@ struct se_device {
unsigned char udev_path[SE_UDEV_PATH_LEN];
/* Pointer to template of function pointers for transport */
const struct target_backend_ops *transport;
- /* Linked list for struct se_hba struct se_device list */
- struct list_head dev_list;
struct se_lun xcopy_lun;
/* Protection Information */
int prot_length;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index d7dd1427fe0d..33d2e3e5773c 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -160,6 +160,7 @@ int target_get_sess_cmd(struct se_cmd *, bool);
int target_put_sess_cmd(struct se_cmd *);
void target_sess_cmd_list_set_waiting(struct se_session *);
void target_wait_for_sess_cmds(struct se_session *);
+void target_show_cmd(const char *pfx, struct se_cmd *cmd);
int core_alua_check_nonop_delay(struct se_cmd *);
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 10e3663a75a6..8e50d01c645f 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -34,7 +34,7 @@
{(unsigned long)__GFP_FS, "__GFP_FS"}, \
{(unsigned long)__GFP_COLD, "__GFP_COLD"}, \
{(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
- {(unsigned long)__GFP_REPEAT, "__GFP_REPEAT"}, \
+ {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
{(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
{(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
{(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
diff --git a/include/uapi/linux/kcmp.h b/include/uapi/linux/kcmp.h
index 84df14b37360..481e103da78e 100644
--- a/include/uapi/linux/kcmp.h
+++ b/include/uapi/linux/kcmp.h
@@ -1,6 +1,8 @@
#ifndef _UAPI_LINUX_KCMP_H
#define _UAPI_LINUX_KCMP_H
+#include <linux/types.h>
+
/* Comparison type */
enum kcmp_type {
KCMP_FILE,
@@ -10,8 +12,16 @@ enum kcmp_type {
KCMP_SIGHAND,
KCMP_IO,
KCMP_SYSVSEM,
+ KCMP_EPOLL_TFD,
KCMP_TYPES,
};
+/* Slot for KCMP_EPOLL_TFD */
+struct kcmp_epoll_slot {
+ __u32 efd; /* epoll file descriptor */
+ __u32 tfd; /* target file number */
+ __u32 toff; /* target offset within same numbered sequence */
+};
+
#endif /* _UAPI_LINUX_KCMP_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index c0b6dfec5f87..6cd63c18708a 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -927,6 +927,8 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_CMMA_MIGRATION 145
#define KVM_CAP_PPC_FWNMI 146
#define KVM_CAP_PPC_SMT_POSSIBLE 147
+#define KVM_CAP_HYPERV_SYNIC2 148
+#define KVM_CAP_HYPERV_VP_INDEX 149
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1351,7 +1353,7 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_X86_SMM */
#define KVM_SMI _IO(KVMIO, 0xb7)
/* Available with KVM_CAP_S390_CMMA_MIGRATION */
-#define KVM_S390_GET_CMMA_BITS _IOW(KVMIO, 0xb8, struct kvm_s390_cmma_log)
+#define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log)
#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
diff --git a/include/uapi/linux/sem.h b/include/uapi/linux/sem.h
index dd73b908b2f3..67eb90361692 100644
--- a/include/uapi/linux/sem.h
+++ b/include/uapi/linux/sem.h
@@ -23,7 +23,7 @@
struct semid_ds {
struct ipc_perm sem_perm; /* permissions .. see ipc.h */
__kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* last change time */
+ __kernel_time_t sem_ctime; /* create/last semctl() time */
struct sem *sem_base; /* ptr to first semaphore in array */
struct sem_queue *sem_pending; /* pending operations to be processed */
struct sem_queue **sem_pending_last; /* last pending operation */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index af17b4154ef6..24a1c4ec2248 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -130,6 +130,11 @@ enum tcmu_genl_cmd {
TCMU_CMD_UNSPEC,
TCMU_CMD_ADDED_DEVICE,
TCMU_CMD_REMOVED_DEVICE,
+ TCMU_CMD_RECONFIG_DEVICE,
+ TCMU_CMD_ADDED_DEVICE_DONE,
+ TCMU_CMD_REMOVED_DEVICE_DONE,
+ TCMU_CMD_RECONFIG_DEVICE_DONE,
+ TCMU_CMD_SET_FEATURES,
__TCMU_CMD_MAX,
};
#define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
@@ -138,6 +143,13 @@ enum tcmu_genl_attr {
TCMU_ATTR_UNSPEC,
TCMU_ATTR_DEVICE,
TCMU_ATTR_MINOR,
+ TCMU_ATTR_PAD,
+ TCMU_ATTR_DEV_CFG,
+ TCMU_ATTR_DEV_SIZE,
+ TCMU_ATTR_WRITECACHE,
+ TCMU_ATTR_CMD_STATUS,
+ TCMU_ATTR_DEVICE_ID,
+ TCMU_ATTR_SUPP_KERN_CMD_REPLY,
__TCMU_ATTR_MAX,
};
#define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)
diff --git a/init/main.c b/init/main.c
index df58a416dd1d..052481fbe363 100644
--- a/init/main.c
+++ b/init/main.c
@@ -518,6 +518,7 @@ asmlinkage __visible void __init start_kernel(void)
/*
* Set up the initial canary ASAP:
*/
+ add_latent_entropy();
boot_init_stack_canary();
cgroup_init_early();
diff --git a/ipc/msg.c b/ipc/msg.c
index 104926dc72be..5b25e0755656 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -97,11 +97,11 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
static void msg_rcu_free(struct rcu_head *head)
{
- struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
- struct msg_queue *msq = ipc_rcu_to_struct(p);
+ struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
+ struct msg_queue *msq = container_of(p, struct msg_queue, q_perm);
security_msg_queue_free(msq);
- ipc_rcu_free(head);
+ kvfree(msq);
}
/**
@@ -114,12 +114,12 @@ static void msg_rcu_free(struct rcu_head *head)
static int newque(struct ipc_namespace *ns, struct ipc_params *params)
{
struct msg_queue *msq;
- int id, retval;
+ int retval;
key_t key = params->key;
int msgflg = params->flg;
- msq = ipc_rcu_alloc(sizeof(*msq));
- if (!msq)
+ msq = kvmalloc(sizeof(*msq), GFP_KERNEL);
+ if (unlikely(!msq))
return -ENOMEM;
msq->q_perm.mode = msgflg & S_IRWXUGO;
@@ -128,7 +128,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq);
if (retval) {
- ipc_rcu_putref(msq, ipc_rcu_free);
+ kvfree(msq);
return retval;
}
@@ -142,10 +142,10 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
INIT_LIST_HEAD(&msq->q_senders);
/* ipc_addid() locks msq upon success. */
- id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
- if (id < 0) {
- ipc_rcu_putref(msq, msg_rcu_free);
- return id;
+ retval = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
+ if (retval < 0) {
+ call_rcu(&msq->q_perm.rcu, msg_rcu_free);
+ return retval;
}
ipc_unlock_object(&msq->q_perm);
@@ -249,7 +249,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
free_msg(msg);
}
atomic_sub(msq->q_cbytes, &ns->msg_bytes);
- ipc_rcu_putref(msq, msg_rcu_free);
+ ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
}
/*
@@ -688,7 +688,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
/* enqueue the sender and prepare to block */
ss_add(msq, &s, msgsz);
- if (!ipc_rcu_getref(msq)) {
+ if (!ipc_rcu_getref(&msq->q_perm)) {
err = -EIDRM;
goto out_unlock0;
}
@@ -700,7 +700,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
rcu_read_lock();
ipc_lock_object(&msq->q_perm);
- ipc_rcu_putref(msq, msg_rcu_free);
+ ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
/* raced with RMID? */
if (!ipc_valid_object(&msq->q_perm)) {
err = -EIDRM;
diff --git a/ipc/sem.c b/ipc/sem.c
index 947dc2348271..9e70cd7a17da 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -87,24 +87,6 @@
#include <linux/uaccess.h>
#include "util.h"
-/* One semaphore structure for each semaphore in the system. */
-struct sem {
- int semval; /* current value */
- /*
- * PID of the process that last modified the semaphore. For
- * Linux, specifically these are:
- * - semop
- * - semctl, via SETVAL and SETALL.
- * - at task exit when performing undo adjustments (see exit_sem).
- */
- int sempid;
- spinlock_t lock; /* spinlock for fine-grained semtimedop */
- struct list_head pending_alter; /* pending single-sop operations */
- /* that alter the semaphore */
- struct list_head pending_const; /* pending single-sop operations */
- /* that do not alter the semaphore*/
- time_t sem_otime; /* candidate for sem_otime */
-} ____cacheline_aligned_in_smp;
/* One queue for each sleeping process in the system. */
struct sem_queue {
@@ -175,7 +157,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
* sem_array.sem_undo
*
* b) global or semaphore sem_lock() for read/write:
- * sem_array.sem_base[i].pending_{const,alter}:
+ * sem_array.sems[i].pending_{const,alter}:
*
* c) special:
* sem_undo_list.list_proc:
@@ -250,7 +232,7 @@ static void unmerge_queues(struct sem_array *sma)
*/
list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
struct sem *curr;
- curr = &sma->sem_base[q->sops[0].sem_num];
+ curr = &sma->sems[q->sops[0].sem_num];
list_add_tail(&q->list, &curr->pending_alter);
}
@@ -270,7 +252,7 @@ static void merge_queues(struct sem_array *sma)
{
int i;
for (i = 0; i < sma->sem_nsems; i++) {
- struct sem *sem = sma->sem_base + i;
+ struct sem *sem = &sma->sems[i];
list_splice_init(&sem->pending_alter, &sma->pending_alter);
}
@@ -278,11 +260,11 @@ static void merge_queues(struct sem_array *sma)
static void sem_rcu_free(struct rcu_head *head)
{
- struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
- struct sem_array *sma = ipc_rcu_to_struct(p);
+ struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
+ struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
security_sem_free(sma);
- ipc_rcu_free(head);
+ kvfree(sma);
}
/*
@@ -306,7 +288,7 @@ static void complexmode_enter(struct sem_array *sma)
sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
for (i = 0; i < sma->sem_nsems; i++) {
- sem = sma->sem_base + i;
+ sem = &sma->sems[i];
spin_lock(&sem->lock);
spin_unlock(&sem->lock);
}
@@ -366,7 +348,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
*
* Both facts are tracked by use_global_mode.
*/
- sem = sma->sem_base + sops->sem_num;
+ sem = &sma->sems[sops->sem_num];
/*
* Initial check for use_global_lock. Just an optimization,
@@ -421,7 +403,7 @@ static inline void sem_unlock(struct sem_array *sma, int locknum)
complexmode_tryleave(sma);
ipc_unlock_object(&sma->sem_perm);
} else {
- struct sem *sem = sma->sem_base + locknum;
+ struct sem *sem = &sma->sems[locknum];
spin_unlock(&sem->lock);
}
}
@@ -456,7 +438,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
sem_lock(sma, NULL, -1);
- ipc_rcu_putref(sma, sem_rcu_free);
+ ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -464,6 +446,24 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
ipc_rmid(&sem_ids(ns), &s->sem_perm);
}
+static struct sem_array *sem_alloc(size_t nsems)
+{
+ struct sem_array *sma;
+ size_t size;
+
+ if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
+ return NULL;
+
+ size = sizeof(*sma) + nsems * sizeof(sma->sems[0]);
+ sma = kvmalloc(size, GFP_KERNEL);
+ if (unlikely(!sma))
+ return NULL;
+
+ memset(sma, 0, size);
+
+ return sma;
+}
+
/**
* newary - Create a new semaphore set
* @ns: namespace
@@ -473,10 +473,8 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
*/
static int newary(struct ipc_namespace *ns, struct ipc_params *params)
{
- int id;
int retval;
struct sem_array *sma;
- int size;
key_t key = params->key;
int nsems = params->u.nsems;
int semflg = params->flg;
@@ -487,29 +485,24 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
if (ns->used_sems + nsems > ns->sc_semmns)
return -ENOSPC;
- size = sizeof(*sma) + nsems * sizeof(struct sem);
- sma = ipc_rcu_alloc(size);
+ sma = sem_alloc(nsems);
if (!sma)
return -ENOMEM;
- memset(sma, 0, size);
-
sma->sem_perm.mode = (semflg & S_IRWXUGO);
sma->sem_perm.key = key;
sma->sem_perm.security = NULL;
retval = security_sem_alloc(sma);
if (retval) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ kvfree(sma);
return retval;
}
- sma->sem_base = (struct sem *) &sma[1];
-
for (i = 0; i < nsems; i++) {
- INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
- INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
- spin_lock_init(&sma->sem_base[i].lock);
+ INIT_LIST_HEAD(&sma->sems[i].pending_alter);
+ INIT_LIST_HEAD(&sma->sems[i].pending_const);
+ spin_lock_init(&sma->sems[i].lock);
}
sma->complex_count = 0;
@@ -520,10 +513,10 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
- if (id < 0) {
- ipc_rcu_putref(sma, sem_rcu_free);
- return id;
+ retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
+ if (retval < 0) {
+ call_rcu(&sma->sem_perm.rcu, sem_rcu_free);
+ return retval;
}
ns->used_sems += nsems;
@@ -612,7 +605,7 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
un = q->undo;
for (sop = sops; sop < sops + nsops; sop++) {
- curr = sma->sem_base + sop->sem_num;
+ curr = &sma->sems[sop->sem_num];
sem_op = sop->sem_op;
result = curr->semval;
@@ -639,7 +632,7 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
sop--;
pid = q->pid;
while (sop >= sops) {
- sma->sem_base[sop->sem_num].sempid = pid;
+ sma->sems[sop->sem_num].sempid = pid;
sop--;
}
@@ -661,7 +654,7 @@ undo:
sop--;
while (sop >= sops) {
sem_op = sop->sem_op;
- sma->sem_base[sop->sem_num].semval -= sem_op;
+ sma->sems[sop->sem_num].semval -= sem_op;
if (sop->sem_flg & SEM_UNDO)
un->semadj[sop->sem_num] += sem_op;
sop--;
@@ -692,7 +685,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
* until the operations can go through.
*/
for (sop = sops; sop < sops + nsops; sop++) {
- curr = sma->sem_base + sop->sem_num;
+ curr = &sma->sems[sop->sem_num];
sem_op = sop->sem_op;
result = curr->semval;
@@ -716,7 +709,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
}
for (sop = sops; sop < sops + nsops; sop++) {
- curr = sma->sem_base + sop->sem_num;
+ curr = &sma->sems[sop->sem_num];
sem_op = sop->sem_op;
result = curr->semval;
@@ -815,7 +808,7 @@ static int wake_const_ops(struct sem_array *sma, int semnum,
if (semnum == -1)
pending_list = &sma->pending_const;
else
- pending_list = &sma->sem_base[semnum].pending_const;
+ pending_list = &sma->sems[semnum].pending_const;
list_for_each_entry_safe(q, tmp, pending_list, list) {
int error = perform_atomic_semop(sma, q);
@@ -856,7 +849,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
for (i = 0; i < nsops; i++) {
int num = sops[i].sem_num;
- if (sma->sem_base[num].semval == 0) {
+ if (sma->sems[num].semval == 0) {
got_zero = 1;
semop_completed |= wake_const_ops(sma, num, wake_q);
}
@@ -867,7 +860,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
* Assume all were changed.
*/
for (i = 0; i < sma->sem_nsems; i++) {
- if (sma->sem_base[i].semval == 0) {
+ if (sma->sems[i].semval == 0) {
got_zero = 1;
semop_completed |= wake_const_ops(sma, i, wake_q);
}
@@ -909,7 +902,7 @@ static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *w
if (semnum == -1)
pending_list = &sma->pending_alter;
else
- pending_list = &sma->sem_base[semnum].pending_alter;
+ pending_list = &sma->sems[semnum].pending_alter;
again:
list_for_each_entry_safe(q, tmp, pending_list, list) {
@@ -922,7 +915,7 @@ again:
* be in the per semaphore pending queue, and decrements
* cannot be successful if the value is already 0.
*/
- if (semnum != -1 && sma->sem_base[semnum].semval == 0)
+ if (semnum != -1 && sma->sems[semnum].semval == 0)
break;
error = perform_atomic_semop(sma, q);
@@ -959,9 +952,9 @@ again:
static void set_semotime(struct sem_array *sma, struct sembuf *sops)
{
if (sops == NULL) {
- sma->sem_base[0].sem_otime = get_seconds();
+ sma->sems[0].sem_otime = get_seconds();
} else {
- sma->sem_base[sops[0].sem_num].sem_otime =
+ sma->sems[sops[0].sem_num].sem_otime =
get_seconds();
}
}
@@ -1067,9 +1060,9 @@ static int count_semcnt(struct sem_array *sma, ushort semnum,
semcnt = 0;
/* First: check the simple operations. They are easy to evaluate */
if (count_zero)
- l = &sma->sem_base[semnum].pending_const;
+ l = &sma->sems[semnum].pending_const;
else
- l = &sma->sem_base[semnum].pending_alter;
+ l = &sma->sems[semnum].pending_alter;
list_for_each_entry(q, l, list) {
/* all task on a per-semaphore list sleep on exactly
@@ -1124,7 +1117,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
}
for (i = 0; i < sma->sem_nsems; i++) {
- struct sem *sem = sma->sem_base + i;
+ struct sem *sem = &sma->sems[i];
list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
unlink_queue(sma, q);
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
@@ -1142,7 +1135,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
wake_up_q(&wake_q);
ns->used_sems -= sma->sem_nsems;
- ipc_rcu_putref(sma, sem_rcu_free);
+ ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
}
static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
@@ -1174,9 +1167,9 @@ static time_t get_semotime(struct sem_array *sma)
int i;
time_t res;
- res = sma->sem_base[0].sem_otime;
+ res = sma->sems[0].sem_otime;
for (i = 1; i < sma->sem_nsems; i++) {
- time_t to = sma->sem_base[i].sem_otime;
+ time_t to = sma->sems[i].sem_otime;
if (to > res)
res = to;
@@ -1325,7 +1318,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
return -EIDRM;
}
- curr = &sma->sem_base[semnum];
+ curr = &sma->sems[semnum];
ipc_assert_locked_object(&sma->sem_perm);
list_for_each_entry(un, &sma->list_id, list_id)
@@ -1382,15 +1375,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_unlock;
}
if (nsems > SEMMSL_FAST) {
- if (!ipc_rcu_getref(sma)) {
+ if (!ipc_rcu_getref(&sma->sem_perm)) {
err = -EIDRM;
goto out_unlock;
}
sem_unlock(sma, -1);
rcu_read_unlock();
- sem_io = ipc_alloc(sizeof(ushort)*nsems);
+ sem_io = kvmalloc_array(nsems, sizeof(ushort),
+ GFP_KERNEL);
if (sem_io == NULL) {
- ipc_rcu_putref(sma, sem_rcu_free);
+ ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return -ENOMEM;
}
@@ -1402,7 +1396,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
}
for (i = 0; i < sma->sem_nsems; i++)
- sem_io[i] = sma->sem_base[i].semval;
+ sem_io[i] = sma->sems[i].semval;
sem_unlock(sma, -1);
rcu_read_unlock();
err = 0;
@@ -1415,29 +1409,30 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- if (!ipc_rcu_getref(sma)) {
+ if (!ipc_rcu_getref(&sma->sem_perm)) {
err = -EIDRM;
goto out_rcu_wakeup;
}
rcu_read_unlock();
if (nsems > SEMMSL_FAST) {
- sem_io = ipc_alloc(sizeof(ushort)*nsems);
+ sem_io = kvmalloc_array(nsems, sizeof(ushort),
+ GFP_KERNEL);
if (sem_io == NULL) {
- ipc_rcu_putref(sma, sem_rcu_free);
+ ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return -ENOMEM;
}
}
if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
- ipc_rcu_putref(sma, sem_rcu_free);
+ ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
err = -EFAULT;
goto out_free;
}
for (i = 0; i < nsems; i++) {
if (sem_io[i] > SEMVMX) {
- ipc_rcu_putref(sma, sem_rcu_free);
+ ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
err = -ERANGE;
goto out_free;
}
@@ -1450,8 +1445,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
for (i = 0; i < nsems; i++) {
- sma->sem_base[i].semval = sem_io[i];
- sma->sem_base[i].sempid = task_tgid_vnr(current);
+ sma->sems[i].semval = sem_io[i];
+ sma->sems[i].sempid = task_tgid_vnr(current);
}
ipc_assert_locked_object(&sma->sem_perm);
@@ -1476,7 +1471,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
err = -EIDRM;
goto out_unlock;
}
- curr = &sma->sem_base[semnum];
+ curr = &sma->sems[semnum];
switch (cmd) {
case GETVAL:
@@ -1500,7 +1495,7 @@ out_rcu_wakeup:
wake_up_q(&wake_q);
out_free:
if (sem_io != fast_sem_io)
- ipc_free(sem_io);
+ kvfree(sem_io);
return err;
}
@@ -1719,7 +1714,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- if (!ipc_rcu_getref(sma)) {
+ if (!ipc_rcu_getref(&sma->sem_perm)) {
rcu_read_unlock();
un = ERR_PTR(-EIDRM);
goto out;
@@ -1729,7 +1724,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 2: allocate new undo structure */
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
if (!new) {
- ipc_rcu_putref(sma, sem_rcu_free);
+ ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return ERR_PTR(-ENOMEM);
}
@@ -1932,7 +1927,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
*/
if (nsops == 1) {
struct sem *curr;
- curr = &sma->sem_base[sops->sem_num];
+ curr = &sma->sems[sops->sem_num];
if (alter) {
if (sma->complex_count) {
@@ -2146,7 +2141,7 @@ void exit_sem(struct task_struct *tsk)
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {
- struct sem *semaphore = &sma->sem_base[i];
+ struct sem *semaphore = &sma->sems[i];
if (un->semadj[i]) {
semaphore->semval += un->semadj[i];
/*
diff --git a/ipc/shm.c b/ipc/shm.c
index f45c7959b264..28a444861a8f 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -174,11 +174,12 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
static void shm_rcu_free(struct rcu_head *head)
{
- struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
- struct shmid_kernel *shp = ipc_rcu_to_struct(p);
-
+ struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
+ rcu);
+ struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
+ shm_perm);
security_shm_free(shp);
- ipc_rcu_free(head);
+ kvfree(shp);
}
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
@@ -241,7 +242,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
user_shm_unlock(i_size_read(file_inode(shm_file)),
shp->mlock_user);
fput(shm_file);
- ipc_rcu_putref(shp, shm_rcu_free);
+ ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
}
/*
@@ -529,7 +530,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct file *file;
char name[13];
- int id;
vm_flags_t acctflag = 0;
if (size < SHMMIN || size > ns->shm_ctlmax)
@@ -542,8 +542,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
ns->shm_tot + numpages > ns->shm_ctlall)
return -ENOSPC;
- shp = ipc_rcu_alloc(sizeof(*shp));
- if (!shp)
+ shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
+ if (unlikely(!shp))
return -ENOMEM;
shp->shm_perm.key = key;
@@ -553,7 +553,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
shp->shm_perm.security = NULL;
error = security_shm_alloc(shp);
if (error) {
- ipc_rcu_putref(shp, ipc_rcu_free);
+ kvfree(shp);
return error;
}
@@ -598,11 +598,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
shp->shm_file = file;
shp->shm_creator = current;
- id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
- if (id < 0) {
- error = id;
+ error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
+ if (error < 0)
goto no_id;
- }
list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
@@ -624,7 +622,7 @@ no_id:
user_shm_unlock(size, shp->mlock_user);
fput(file);
no_file:
- ipc_rcu_putref(shp, shm_rcu_free);
+ call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
return error;
}
diff --git a/ipc/util.c b/ipc/util.c
index caec7b1bfaa3..1a2cb02467ab 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -232,6 +232,7 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
idr_preload(GFP_KERNEL);
+ atomic_set(&new->refcount, 1);
spin_lock_init(&new->lock);
new->deleted = false;
rcu_read_lock();
@@ -394,70 +395,18 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
ipcp->deleted = true;
}
-/**
- * ipc_alloc - allocate ipc space
- * @size: size desired
- *
- * Allocate memory from the appropriate pools and return a pointer to it.
- * NULL is returned if the allocation fails
- */
-void *ipc_alloc(int size)
-{
- return kvmalloc(size, GFP_KERNEL);
-}
-
-/**
- * ipc_free - free ipc space
- * @ptr: pointer returned by ipc_alloc
- *
- * Free a block created with ipc_alloc().
- */
-void ipc_free(void *ptr)
+int ipc_rcu_getref(struct kern_ipc_perm *ptr)
{
- kvfree(ptr);
+ return atomic_inc_not_zero(&ptr->refcount);
}
-/**
- * ipc_rcu_alloc - allocate ipc and rcu space
- * @size: size desired
- *
- * Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object or NULL upon failure.
- */
-void *ipc_rcu_alloc(int size)
+void ipc_rcu_putref(struct kern_ipc_perm *ptr,
+ void (*func)(struct rcu_head *head))
{
- /*
- * We prepend the allocation with the rcu struct
- */
- struct ipc_rcu *out = ipc_alloc(sizeof(struct ipc_rcu) + size);
- if (unlikely(!out))
- return NULL;
- atomic_set(&out->refcount, 1);
- return out + 1;
-}
-
-int ipc_rcu_getref(void *ptr)
-{
- struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
-
- return atomic_inc_not_zero(&p->refcount);
-}
-
-void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
-{
- struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
-
- if (!atomic_dec_and_test(&p->refcount))
+ if (!atomic_dec_and_test(&ptr->refcount))
return;
- call_rcu(&p->rcu, func);
-}
-
-void ipc_rcu_free(struct rcu_head *head)
-{
- struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
-
- kvfree(p);
+ call_rcu(&ptr->rcu, func);
}
/**
diff --git a/ipc/util.h b/ipc/util.h
index 60ddccca464d..c692010e6f0a 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -47,13 +47,6 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
static inline void shm_exit_ns(struct ipc_namespace *ns) { }
#endif
-struct ipc_rcu {
- struct rcu_head rcu;
- atomic_t refcount;
-} ____cacheline_aligned_in_smp;
-
-#define ipc_rcu_to_struct(p) ((void *)(p+1))
-
/*
* Structure that holds the parameters needed by the ipc operations
* (see after)
@@ -114,22 +107,18 @@ void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *);
/* must be called with ipcp locked */
int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg);
-/* for rare, potentially huge allocations.
- * both function can sleep
- */
-void *ipc_alloc(int size);
-void ipc_free(void *ptr);
-
/*
* For allocation that need to be freed by RCU.
* Objects are reference counted, they start with reference count 1.
* getref increases the refcount, the putref call that reduces the recount
* to 0 schedules the rcu destruction. Caller must guarantee locking.
+ *
+ * refcount is initialized by ipc_addid(), before that point call_rcu()
+ * must be used.
*/
-void *ipc_rcu_alloc(int size);
-int ipc_rcu_getref(void *ptr);
-void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
-void ipc_rcu_free(struct rcu_head *head);
+int ipc_rcu_getref(struct kern_ipc_perm *ptr);
+void ipc_rcu_putref(struct kern_ipc_perm *ptr,
+ void (*func)(struct rcu_head *head));
struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id);
diff --git a/kernel/Makefile b/kernel/Makefile
index 72aa080f91f0..4cb8e8b23c6e 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -82,7 +82,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KGDB) += debug/
obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
-obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog_hld.o
+obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
diff --git a/kernel/audit.c b/kernel/audit.c
index 833267bbd80b..6dd556931739 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -641,6 +641,7 @@ static int auditd_send_unicast_skb(struct sk_buff *skb)
ac = rcu_dereference(auditd_conn);
if (!ac) {
rcu_read_unlock();
+ kfree_skb(skb);
rc = -ECONNREFUSED;
goto err;
}
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 9bbd33497d3d..e833ed914358 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -377,10 +377,22 @@ static void bpf_evict_inode(struct inode *inode)
bpf_any_put(inode->i_private, type);
}
+/*
+ * Display the mount options in /proc/mounts.
+ */
+static int bpf_show_options(struct seq_file *m, struct dentry *root)
+{
+ umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
+
+ if (mode != S_IRWXUGO)
+ seq_printf(m, ",mode=%o", mode);
+ return 0;
+}
+
static const struct super_operations bpf_super_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
- .show_options = generic_show_options,
+ .show_options = bpf_show_options,
.evict_inode = bpf_evict_inode,
};
@@ -434,8 +446,6 @@ static int bpf_fill_super(struct super_block *sb, void *data, int silent)
struct inode *inode;
int ret;
- save_mount_options(sb, data);
-
ret = bpf_parse_options(data, &opts);
if (ret)
return ret;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index ebe9b38ff522..db6a289ebf0b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -504,6 +504,7 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
{
regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
+ regs[regno].value_from_signed = false;
regs[regno].min_align = 0;
}
@@ -777,12 +778,13 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
return -EACCES;
}
-static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
+static bool __is_pointer_value(bool allow_ptr_leaks,
+ const struct bpf_reg_state *reg)
{
- if (env->allow_ptr_leaks)
+ if (allow_ptr_leaks)
return false;
- switch (env->cur_state.regs[regno].type) {
+ switch (reg->type) {
case UNKNOWN_VALUE:
case CONST_IMM:
return false;
@@ -791,6 +793,11 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
}
}
+static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
+{
+ return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
+}
+
static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
int off, int size, bool strict)
{
@@ -1844,10 +1851,24 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
dst_align = dst_reg->min_align;
/* We don't know anything about what was done to this register, mark it
- * as unknown.
+ * as unknown. Also, if both derived bounds came from signed/unsigned
+ * mixed compares and one side is unbounded, we cannot really do anything
+ * with them as boundaries cannot be trusted. Thus, arithmetic of two
+ * regs of such kind will get invalidated bounds on the dst side.
*/
- if (min_val == BPF_REGISTER_MIN_RANGE &&
- max_val == BPF_REGISTER_MAX_RANGE) {
+ if ((min_val == BPF_REGISTER_MIN_RANGE &&
+ max_val == BPF_REGISTER_MAX_RANGE) ||
+ (BPF_SRC(insn->code) == BPF_X &&
+ ((min_val != BPF_REGISTER_MIN_RANGE &&
+ max_val == BPF_REGISTER_MAX_RANGE) ||
+ (min_val == BPF_REGISTER_MIN_RANGE &&
+ max_val != BPF_REGISTER_MAX_RANGE) ||
+ (dst_reg->min_value != BPF_REGISTER_MIN_RANGE &&
+ dst_reg->max_value == BPF_REGISTER_MAX_RANGE) ||
+ (dst_reg->min_value == BPF_REGISTER_MIN_RANGE &&
+ dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) &&
+ regs[insn->dst_reg].value_from_signed !=
+ regs[insn->src_reg].value_from_signed)) {
reset_reg_range_values(regs, insn->dst_reg);
return;
}
@@ -2035,6 +2056,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
regs[insn->dst_reg].max_value = insn->imm;
regs[insn->dst_reg].min_value = insn->imm;
regs[insn->dst_reg].min_align = calc_align(insn->imm);
+ regs[insn->dst_reg].value_from_signed = false;
}
} else if (opcode > BPF_END) {
@@ -2210,40 +2232,63 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
u8 opcode)
{
+ bool value_from_signed = true;
+ bool is_range = true;
+
switch (opcode) {
case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure.
*/
true_reg->max_value = true_reg->min_value = val;
+ is_range = false;
break;
case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure;
*/
false_reg->max_value = false_reg->min_value = val;
+ is_range = false;
break;
case BPF_JGT:
- /* Unsigned comparison, the minimum value is 0. */
- false_reg->min_value = 0;
+ value_from_signed = false;
/* fallthrough */
case BPF_JSGT:
+ if (true_reg->value_from_signed != value_from_signed)
+ reset_reg_range_values(true_reg, 0);
+ if (false_reg->value_from_signed != value_from_signed)
+ reset_reg_range_values(false_reg, 0);
+ if (opcode == BPF_JGT) {
+ /* Unsigned comparison, the minimum value is 0. */
+ false_reg->min_value = 0;
+ }
/* If this is false then we know the maximum val is val,
* otherwise we know the min val is val+1.
*/
false_reg->max_value = val;
+ false_reg->value_from_signed = value_from_signed;
true_reg->min_value = val + 1;
+ true_reg->value_from_signed = value_from_signed;
break;
case BPF_JGE:
- /* Unsigned comparison, the minimum value is 0. */
- false_reg->min_value = 0;
+ value_from_signed = false;
/* fallthrough */
case BPF_JSGE:
+ if (true_reg->value_from_signed != value_from_signed)
+ reset_reg_range_values(true_reg, 0);
+ if (false_reg->value_from_signed != value_from_signed)
+ reset_reg_range_values(false_reg, 0);
+ if (opcode == BPF_JGE) {
+ /* Unsigned comparison, the minimum value is 0. */
+ false_reg->min_value = 0;
+ }
/* If this is false then we know the maximum value is val - 1,
* otherwise we know the mimimum value is val.
*/
false_reg->max_value = val - 1;
+ false_reg->value_from_signed = value_from_signed;
true_reg->min_value = val;
+ true_reg->value_from_signed = value_from_signed;
break;
default:
break;
@@ -2251,6 +2296,12 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
check_reg_overflow(false_reg);
check_reg_overflow(true_reg);
+ if (is_range) {
+ if (__is_pointer_value(false, false_reg))
+ reset_reg_range_values(false_reg, 0);
+ if (__is_pointer_value(false, true_reg))
+ reset_reg_range_values(true_reg, 0);
+ }
}
/* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
@@ -2260,41 +2311,64 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
u8 opcode)
{
+ bool value_from_signed = true;
+ bool is_range = true;
+
switch (opcode) {
case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure.
*/
true_reg->max_value = true_reg->min_value = val;
+ is_range = false;
break;
case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure;
*/
false_reg->max_value = false_reg->min_value = val;
+ is_range = false;
break;
case BPF_JGT:
- /* Unsigned comparison, the minimum value is 0. */
- true_reg->min_value = 0;
+ value_from_signed = false;
/* fallthrough */
case BPF_JSGT:
+ if (true_reg->value_from_signed != value_from_signed)
+ reset_reg_range_values(true_reg, 0);
+ if (false_reg->value_from_signed != value_from_signed)
+ reset_reg_range_values(false_reg, 0);
+ if (opcode == BPF_JGT) {
+ /* Unsigned comparison, the minimum value is 0. */
+ true_reg->min_value = 0;
+ }
/*
* If this is false, then the val is <= the register, if it is
* true the register <= to the val.
*/
false_reg->min_value = val;
+ false_reg->value_from_signed = value_from_signed;
true_reg->max_value = val - 1;
+ true_reg->value_from_signed = value_from_signed;
break;
case BPF_JGE:
- /* Unsigned comparison, the minimum value is 0. */
- true_reg->min_value = 0;
+ value_from_signed = false;
/* fallthrough */
case BPF_JSGE:
+ if (true_reg->value_from_signed != value_from_signed)
+ reset_reg_range_values(true_reg, 0);
+ if (false_reg->value_from_signed != value_from_signed)
+ reset_reg_range_values(false_reg, 0);
+ if (opcode == BPF_JGE) {
+ /* Unsigned comparison, the minimum value is 0. */
+ true_reg->min_value = 0;
+ }
/* If this is false then constant < register, if it is true then
* the register < constant.
*/
false_reg->min_value = val + 1;
+ false_reg->value_from_signed = value_from_signed;
true_reg->max_value = val;
+ true_reg->value_from_signed = value_from_signed;
break;
default:
break;
@@ -2302,6 +2376,12 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
check_reg_overflow(false_reg);
check_reg_overflow(true_reg);
+ if (is_range) {
+ if (__is_pointer_value(false, false_reg))
+ reset_reg_range_values(false_reg, 0);
+ if (__is_pointer_value(false, true_reg))
+ reset_reg_range_values(true_reg, 0);
+ }
}
static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index ab860453841d..eee033134262 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -279,7 +279,8 @@ static int bringup_wait_for_ap(unsigned int cpu)
/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
wait_for_completion(&st->done);
- BUG_ON(!cpu_online(cpu));
+ if (WARN_ON_ONCE((!cpu_online(cpu))))
+ return -ECANCELED;
/* Unpark the stopper thread and the hotplug thread of the target cpu */
stop_machine_unpark(cpu);
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index fcbd568f1e95..6db80fc0810b 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -14,10 +14,12 @@
#include <asm/sections.h>
/* vmcoreinfo stuff */
-static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
-u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
-size_t vmcoreinfo_size;
-size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
+static unsigned char *vmcoreinfo_data;
+static size_t vmcoreinfo_size;
+u32 *vmcoreinfo_note;
+
+/* trusted vmcoreinfo, e.g. we can make a copy in the crash memory */
+static unsigned char *vmcoreinfo_data_safecopy;
/*
* parsing the "crashkernel" commandline
@@ -324,8 +326,23 @@ static void update_vmcoreinfo_note(void)
final_note(buf);
}
+void crash_update_vmcoreinfo_safecopy(void *ptr)
+{
+ if (ptr)
+ memcpy(ptr, vmcoreinfo_data, vmcoreinfo_size);
+
+ vmcoreinfo_data_safecopy = ptr;
+}
+
void crash_save_vmcoreinfo(void)
{
+ if (!vmcoreinfo_note)
+ return;
+
+ /* Use the safe copy to generate vmcoreinfo note if have */
+ if (vmcoreinfo_data_safecopy)
+ vmcoreinfo_data = vmcoreinfo_data_safecopy;
+
vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
update_vmcoreinfo_note();
}
@@ -340,7 +357,7 @@ void vmcoreinfo_append_str(const char *fmt, ...)
r = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
- r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
+ r = min(r, (size_t)VMCOREINFO_BYTES - vmcoreinfo_size);
memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
@@ -356,11 +373,26 @@ void __weak arch_crash_save_vmcoreinfo(void)
phys_addr_t __weak paddr_vmcoreinfo_note(void)
{
- return __pa_symbol((unsigned long)(char *)&vmcoreinfo_note);
+ return __pa(vmcoreinfo_note);
}
static int __init crash_save_vmcoreinfo_init(void)
{
+ vmcoreinfo_data = (unsigned char *)get_zeroed_page(GFP_KERNEL);
+ if (!vmcoreinfo_data) {
+ pr_warn("Memory allocation for vmcoreinfo_data failed\n");
+ return -ENOMEM;
+ }
+
+ vmcoreinfo_note = alloc_pages_exact(VMCOREINFO_NOTE_SIZE,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vmcoreinfo_note) {
+ free_page((unsigned long)vmcoreinfo_data);
+ vmcoreinfo_data = NULL;
+ pr_warn("Memory allocation for vmcoreinfo_note failed\n");
+ return -ENOMEM;
+ }
+
VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
VMCOREINFO_PAGESIZE(PAGE_SIZE);
diff --git a/kernel/fork.c b/kernel/fork.c
index 0f69a3e5281e..17921b0390b4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -205,19 +205,17 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
void *stack;
int i;
- local_irq_disable();
for (i = 0; i < NR_CACHED_STACKS; i++) {
- struct vm_struct *s = this_cpu_read(cached_stacks[i]);
+ struct vm_struct *s;
+
+ s = this_cpu_xchg(cached_stacks[i], NULL);
if (!s)
continue;
- this_cpu_write(cached_stacks[i], NULL);
tsk->stack_vm_area = s;
- local_irq_enable();
return s->addr;
}
- local_irq_enable();
stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
VMALLOC_START, VMALLOC_END,
@@ -245,19 +243,15 @@ static inline void free_thread_stack(struct task_struct *tsk)
{
#ifdef CONFIG_VMAP_STACK
if (task_stack_vm_area(tsk)) {
- unsigned long flags;
int i;
- local_irq_save(flags);
for (i = 0; i < NR_CACHED_STACKS; i++) {
- if (this_cpu_read(cached_stacks[i]))
+ if (this_cpu_cmpxchg(cached_stacks[i],
+ NULL, tsk->stack_vm_area) != NULL)
continue;
- this_cpu_write(cached_stacks[i], tsk->stack_vm_area);
- local_irq_restore(flags);
return;
}
- local_irq_restore(flags);
vfree_atomic(tsk->stack);
return;
@@ -560,7 +554,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
set_task_stack_end_magic(tsk);
#ifdef CONFIG_CC_STACKPROTECTOR
- tsk->stack_canary = get_random_long();
+ tsk->stack_canary = get_random_canary();
#endif
/*
@@ -579,6 +573,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
kcov_task_init(tsk);
+#ifdef CONFIG_FAULT_INJECTION
+ tsk->fail_nth = 0;
+#endif
+
return tsk;
free_stack:
diff --git a/kernel/futex.c b/kernel/futex.c
index c934689043b2..16dbe4c93895 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -212,7 +212,7 @@ struct futex_pi_state {
atomic_t refcount;
union futex_key key;
-};
+} __randomize_layout;
/**
* struct futex_q - The hashed futex queue entry, one per waiting task
@@ -246,7 +246,7 @@ struct futex_q {
struct rt_mutex_waiter *rt_waiter;
union futex_key *requeue_pi_key;
u32 bitset;
-};
+} __randomize_layout;
static const struct futex_q futex_q_init = {
/* list gets initialized in queue_me()*/
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 5624b2dd6b58..1d1a5b945ab4 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1090,6 +1090,16 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
+ *
+ * Locking rules:
+ *
+ * desc->request_mutex Provides serialization against a concurrent free_irq()
+ * chip_bus_lock Provides serialization for slow bus operations
+ * desc->lock Provides serialization against hard interrupts
+ *
+ * chip_bus_lock and desc->lock are sufficient for all other management and
+ * interrupt related functions. desc->request_mutex solely serializes
+ * request/free_irq().
*/
static int
__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
@@ -1167,20 +1177,35 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
new->flags &= ~IRQF_ONESHOT;
+ /*
+ * Protects against a concurrent __free_irq() call which might wait
+ * for synchronize_irq() to complete without holding the optional
+ * chip bus lock and desc->lock.
+ */
mutex_lock(&desc->request_mutex);
+
+ /*
+ * Acquire bus lock as the irq_request_resources() callback below
+ * might rely on the serialization or the magic power management
+ * functions which are abusing the irq_bus_lock() callback,
+ */
+ chip_bus_lock(desc);
+
+ /* First installed action requests resources. */
if (!desc->action) {
ret = irq_request_resources(desc);
if (ret) {
pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
new->name, irq, desc->irq_data.chip->name);
- goto out_mutex;
+ goto out_bus_unlock;
}
}
- chip_bus_lock(desc);
-
/*
* The following block of code has to be executed atomically
+ * protected against a concurrent interrupt and any of the other
+ * management calls which are not serialized via
+ * desc->request_mutex or the optional bus lock.
*/
raw_spin_lock_irqsave(&desc->lock, flags);
old_ptr = &desc->action;
@@ -1286,10 +1311,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
ret = __irq_set_trigger(desc,
new->flags & IRQF_TRIGGER_MASK);
- if (ret) {
- irq_release_resources(desc);
+ if (ret)
goto out_unlock;
- }
}
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
@@ -1385,12 +1408,10 @@ mismatch:
out_unlock:
raw_spin_unlock_irqrestore(&desc->lock, flags);
- chip_bus_sync_unlock(desc);
-
if (!desc->action)
irq_release_resources(desc);
-
-out_mutex:
+out_bus_unlock:
+ chip_bus_sync_unlock(desc);
mutex_unlock(&desc->request_mutex);
out_thread:
@@ -1472,6 +1493,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
WARN(1, "Trying to free already-free IRQ %d\n", irq);
raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(desc);
+ mutex_unlock(&desc->request_mutex);
return NULL;
}
@@ -1498,6 +1520,20 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
#endif
raw_spin_unlock_irqrestore(&desc->lock, flags);
+ /*
+ * Drop bus_lock here so the changes which were done in the chip
+ * callbacks above are synced out to the irq chips which hang
+ * behind a slow bus (I2C, SPI) before calling synchronize_irq().
+ *
+ * Aside of that the bus_lock can also be taken from the threaded
+ * handler in irq_finalize_oneshot() which results in a deadlock
+ * because synchronize_irq() would wait forever for the thread to
+ * complete, which is blocked on the bus lock.
+ *
+ * The still held desc->request_mutex() protects against a
+ * concurrent request_irq() of this irq so the release of resources
+ * and timing data is properly serialized.
+ */
chip_bus_sync_unlock(desc);
unregister_handler_proc(irq, action);
@@ -1530,8 +1566,15 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
}
}
+ /* Last action releases resources */
if (!desc->action) {
+ /*
+ * Reaquire bus lock as irq_release_resources() might
+ * require it to deallocate resources over the slow bus.
+ */
+ chip_bus_lock(desc);
irq_release_resources(desc);
+ chip_bus_sync_unlock(desc);
irq_remove_timings(desc);
}
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index 3a47fa998fe0..ea34ed8bb952 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -11,6 +11,10 @@
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/kcmp.h>
+#include <linux/capability.h>
+#include <linux/list.h>
+#include <linux/eventpoll.h>
+#include <linux/file.h>
#include <asm/unistd.h>
@@ -94,6 +98,56 @@ static int kcmp_lock(struct mutex *m1, struct mutex *m2)
return err;
}
+#ifdef CONFIG_EPOLL
+static int kcmp_epoll_target(struct task_struct *task1,
+ struct task_struct *task2,
+ unsigned long idx1,
+ struct kcmp_epoll_slot __user *uslot)
+{
+ struct file *filp, *filp_epoll, *filp_tgt;
+ struct kcmp_epoll_slot slot;
+ struct files_struct *files;
+
+ if (copy_from_user(&slot, uslot, sizeof(slot)))
+ return -EFAULT;
+
+ filp = get_file_raw_ptr(task1, idx1);
+ if (!filp)
+ return -EBADF;
+
+ files = get_files_struct(task2);
+ if (!files)
+ return -EBADF;
+
+ spin_lock(&files->file_lock);
+ filp_epoll = fcheck_files(files, slot.efd);
+ if (filp_epoll)
+ get_file(filp_epoll);
+ else
+ filp_tgt = ERR_PTR(-EBADF);
+ spin_unlock(&files->file_lock);
+ put_files_struct(files);
+
+ if (filp_epoll) {
+ filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
+ fput(filp_epoll);
+ } else
+
+ if (IS_ERR(filp_tgt))
+ return PTR_ERR(filp_tgt);
+
+ return kcmp_ptr(filp, filp_tgt, KCMP_FILE);
+}
+#else
+static int kcmp_epoll_target(struct task_struct *task1,
+ struct task_struct *task2,
+ unsigned long idx1,
+ struct kcmp_epoll_slot __user *uslot)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
unsigned long, idx1, unsigned long, idx2)
{
@@ -165,6 +219,9 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
ret = -EOPNOTSUPP;
#endif
break;
+ case KCMP_EPOLL_TFD:
+ ret = kcmp_epoll_target(task1, task2, idx1, (void *)idx2);
+ break;
default:
ret = -EINVAL;
break;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 980936a90ee6..e62ec4dc6620 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -144,6 +144,14 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
if (ret)
goto out;
+ /*
+ * Some architecture(like S390) may touch the crash memory before
+ * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
+ */
+ ret = kimage_crash_copy_vmcoreinfo(image);
+ if (ret)
+ goto out;
+
for (i = 0; i < nr_segments; i++) {
ret = kimage_load_segment(image, &image->segment[i]);
if (ret)
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 154ffb489b93..1ae7c41c33c1 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -482,6 +482,40 @@ struct page *kimage_alloc_control_pages(struct kimage *image,
return pages;
}
+int kimage_crash_copy_vmcoreinfo(struct kimage *image)
+{
+ struct page *vmcoreinfo_page;
+ void *safecopy;
+
+ if (image->type != KEXEC_TYPE_CRASH)
+ return 0;
+
+ /*
+ * For kdump, allocate one vmcoreinfo safe copy from the
+ * crash memory. as we have arch_kexec_protect_crashkres()
+ * after kexec syscall, we naturally protect it from write
+ * (even read) access under kernel direct mapping. But on
+ * the other hand, we still need to operate it when crash
+ * happens to generate vmcoreinfo note, hereby we rely on
+ * vmap for this purpose.
+ */
+ vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
+ if (!vmcoreinfo_page) {
+ pr_warn("Could not allocate vmcoreinfo buffer\n");
+ return -ENOMEM;
+ }
+ safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
+ if (!safecopy) {
+ pr_warn("Could not vmap vmcoreinfo buffer\n");
+ return -ENOMEM;
+ }
+
+ image->vmcoreinfo_data_copy = safecopy;
+ crash_update_vmcoreinfo_safecopy(safecopy);
+
+ return 0;
+}
+
static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
{
if (*image->entry != 0)
@@ -569,6 +603,11 @@ void kimage_free(struct kimage *image)
if (!image)
return;
+ if (image->vmcoreinfo_data_copy) {
+ crash_update_vmcoreinfo_safecopy(NULL);
+ vunmap(image->vmcoreinfo_data_copy);
+ }
+
kimage_free_extra_pages(image);
for_each_kimage_entry(image, ptr, entry) {
if (entry & IND_INDIRECTION) {
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 766e7e4d3ad9..9f48f4412297 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -26,13 +26,6 @@
#include <linux/vmalloc.h>
#include "kexec_internal.h"
-/*
- * Declare these symbols weak so that if architecture provides a purgatory,
- * these will be overridden.
- */
-char __weak kexec_purgatory[0];
-size_t __weak kexec_purgatory_size = 0;
-
static int kexec_calculate_store_digests(struct kimage *image);
/* Architectures can provide this probe function */
@@ -298,6 +291,14 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
if (ret)
goto out;
+ /*
+ * Some architecture(like S390) may touch the crash memory before
+ * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
+ */
+ ret = kimage_crash_copy_vmcoreinfo(image);
+ if (ret)
+ goto out;
+
ret = kexec_calculate_store_digests(image);
if (ret)
goto out;
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h
index 799a8a452187..50dfcb039a41 100644
--- a/kernel/kexec_internal.h
+++ b/kernel/kexec_internal.h
@@ -17,6 +17,8 @@ extern struct mutex kexec_mutex;
#ifdef CONFIG_KEXEC_FILE
#include <linux/purgatory.h>
void kimage_file_post_load_cleanup(struct kimage *image);
+extern char kexec_purgatory[];
+extern size_t kexec_purgatory_size;
#else /* CONFIG_KEXEC_FILE */
static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
#endif /* CONFIG_KEXEC_FILE */
diff --git a/kernel/kmod.c b/kernel/kmod.c
index ff68198fe83b..6d016c5d97c8 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -68,6 +68,7 @@ static DECLARE_RWSEM(umhelper_sem);
*/
#define MAX_KMOD_CONCURRENT 50
static atomic_t kmod_concurrent_max = ATOMIC_INIT(MAX_KMOD_CONCURRENT);
+static DECLARE_WAIT_QUEUE_HEAD(kmod_wq);
/*
modprobe_path is set via /proc/sys.
@@ -140,7 +141,6 @@ int __request_module(bool wait, const char *fmt, ...)
va_list args;
char module_name[MODULE_NAME_LEN];
int ret;
- static int kmod_loop_msg;
/*
* We don't allow synchronous module loading from async. Module
@@ -164,14 +164,11 @@ int __request_module(bool wait, const char *fmt, ...)
return ret;
if (atomic_dec_if_positive(&kmod_concurrent_max) < 0) {
- /* We may be blaming an innocent here, but unlikely */
- if (kmod_loop_msg < 5) {
- printk(KERN_ERR
- "request_module: runaway loop modprobe %s\n",
- module_name);
- kmod_loop_msg++;
- }
- return -ENOMEM;
+ pr_warn_ratelimited("request_module: kmod_concurrent_max (%u) close to 0 (max_modprobes: %u), for module %s, throttling...",
+ atomic_read(&kmod_concurrent_max),
+ MAX_KMOD_CONCURRENT, module_name);
+ wait_event_interruptible(kmod_wq,
+ atomic_dec_if_positive(&kmod_concurrent_max) >= 0);
}
trace_module_request(module_name, wait, _RET_IP_);
@@ -179,6 +176,7 @@ int __request_module(bool wait, const char *fmt, ...)
ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
atomic_inc(&kmod_concurrent_max);
+ wake_up(&kmod_wq);
return ret;
}
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index df1a9aa602a0..46ba853656f6 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -134,7 +134,7 @@ static ssize_t vmcoreinfo_show(struct kobject *kobj,
{
phys_addr_t vmcore_base = paddr_vmcoreinfo_note();
return sprintf(buf, "%pa %x\n", &vmcore_base,
- (unsigned int)sizeof(vmcoreinfo_note));
+ (unsigned int)VMCOREINFO_NOTE_SIZE);
}
KERNEL_ATTR_RO(vmcoreinfo);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 076a2e31951c..29a397067ffa 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -610,6 +610,11 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_cpu->sg_policy = sg_policy;
sg_cpu->flags = SCHED_CPUFREQ_RT;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+ }
+
+ for_each_cpu(cpu, policy->cpus) {
+ struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
policy_is_shared(policy) ?
sugov_update_shared :
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4dfba1a76cc3..6648fbbb8157 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -174,11 +174,32 @@ extern int no_unaligned_warning;
#ifdef CONFIG_PROC_SYSCTL
-#define SYSCTL_WRITES_LEGACY -1
-#define SYSCTL_WRITES_WARN 0
-#define SYSCTL_WRITES_STRICT 1
+/**
+ * enum sysctl_writes_mode - supported sysctl write modes
+ *
+ * @SYSCTL_WRITES_LEGACY: each write syscall must fully contain the sysctl value
+ * to be written, and multiple writes on the same sysctl file descriptor
+ * will rewrite the sysctl value, regardless of file position. No warning
+ * is issued when the initial position is not 0.
+ * @SYSCTL_WRITES_WARN: same as above but warn when the initial file position is
+ * not 0.
+ * @SYSCTL_WRITES_STRICT: writes to numeric sysctl entries must always be at
+ * file position 0 and the value must be fully contained in the buffer
+ * sent to the write syscall. If dealing with strings respect the file
+ * position, but restrict this to the max length of the buffer, anything
+ * passed the max lenght will be ignored. Multiple writes will append
+ * to the buffer.
+ *
+ * These write modes control how current file position affects the behavior of
+ * updating sysctl values through the proc interface on each write.
+ */
+enum sysctl_writes_mode {
+ SYSCTL_WRITES_LEGACY = -1,
+ SYSCTL_WRITES_WARN = 0,
+ SYSCTL_WRITES_STRICT = 1,
+};
-static int sysctl_writes_strict = SYSCTL_WRITES_STRICT;
+static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT;
static int proc_do_cad_pid(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
@@ -880,6 +901,14 @@ static struct ctl_table kern_table[] = {
#endif
},
{
+ .procname = "watchdog_cpumask",
+ .data = &watchdog_cpumask_bits,
+ .maxlen = NR_CPUS,
+ .mode = 0644,
+ .proc_handler = proc_watchdog_cpumask,
+ },
+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
+ {
.procname = "soft_watchdog",
.data = &soft_watchdog_enabled,
.maxlen = sizeof (int),
@@ -889,13 +918,6 @@ static struct ctl_table kern_table[] = {
.extra2 = &one,
},
{
- .procname = "watchdog_cpumask",
- .data = &watchdog_cpumask_bits,
- .maxlen = NR_CPUS,
- .mode = 0644,
- .proc_handler = proc_watchdog_cpumask,
- },
- {
.procname = "softlockup_panic",
.data = &softlockup_panic,
.maxlen = sizeof(int),
@@ -904,27 +926,29 @@ static struct ctl_table kern_table[] = {
.extra1 = &zero,
.extra2 = &one,
},
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_SMP
{
- .procname = "hardlockup_panic",
- .data = &hardlockup_panic,
+ .procname = "softlockup_all_cpu_backtrace",
+ .data = &sysctl_softlockup_all_cpu_backtrace,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
+#endif /* CONFIG_SMP */
#endif
-#ifdef CONFIG_SMP
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
{
- .procname = "softlockup_all_cpu_backtrace",
- .data = &sysctl_softlockup_all_cpu_backtrace,
+ .procname = "hardlockup_panic",
+ .data = &hardlockup_panic,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
+#ifdef CONFIG_SMP
{
.procname = "hardlockup_all_cpu_backtrace",
.data = &sysctl_hardlockup_all_cpu_backtrace,
@@ -936,6 +960,8 @@ static struct ctl_table kern_table[] = {
},
#endif /* CONFIG_SMP */
#endif
+#endif
+
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
{
.procname = "unknown_nmi_panic",
@@ -1950,6 +1976,32 @@ static void warn_sysctl_write(struct ctl_table *table)
}
/**
+ * proc_first_pos_non_zero_ignore - check if firs position is allowed
+ * @ppos: file position
+ * @table: the sysctl table
+ *
+ * Returns true if the first position is non-zero and the sysctl_writes_strict
+ * mode indicates this is not allowed for numeric input types. String proc
+ * hadlers can ignore the return value.
+ */
+static bool proc_first_pos_non_zero_ignore(loff_t *ppos,
+ struct ctl_table *table)
+{
+ if (!*ppos)
+ return false;
+
+ switch (sysctl_writes_strict) {
+ case SYSCTL_WRITES_STRICT:
+ return true;
+ case SYSCTL_WRITES_WARN:
+ warn_sysctl_write(table);
+ return false;
+ default:
+ return false;
+ }
+}
+
+/**
* proc_dostring - read a string sysctl
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
@@ -1969,8 +2021,8 @@ static void warn_sysctl_write(struct ctl_table *table)
int proc_dostring(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- if (write && *ppos && sysctl_writes_strict == SYSCTL_WRITES_WARN)
- warn_sysctl_write(table);
+ if (write)
+ proc_first_pos_non_zero_ignore(ppos, table);
return _proc_do_string((char *)(table->data), table->maxlen, write,
(char __user *)buffer, lenp, ppos);
@@ -2128,19 +2180,18 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
return 0;
}
-static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
- int *valp,
- int write, void *data)
+static int do_proc_douintvec_conv(unsigned long *lvalp,
+ unsigned int *valp,
+ int write, void *data)
{
if (write) {
- if (*negp)
+ if (*lvalp > UINT_MAX)
return -EINVAL;
if (*lvalp > UINT_MAX)
return -EINVAL;
*valp = *lvalp;
} else {
unsigned int val = *valp;
- *negp = false;
*lvalp = (unsigned long)val;
}
return 0;
@@ -2172,17 +2223,8 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
conv = do_proc_dointvec_conv;
if (write) {
- if (*ppos) {
- switch (sysctl_writes_strict) {
- case SYSCTL_WRITES_STRICT:
- goto out;
- case SYSCTL_WRITES_WARN:
- warn_sysctl_write(table);
- break;
- default:
- break;
- }
- }
+ if (proc_first_pos_non_zero_ignore(ppos, table))
+ goto out;
if (left > PAGE_SIZE - 1)
left = PAGE_SIZE - 1;
@@ -2249,6 +2291,146 @@ static int do_proc_dointvec(struct ctl_table *table, int write,
buffer, lenp, ppos, conv, data);
}
+static int do_proc_douintvec_w(unsigned int *tbl_data,
+ struct ctl_table *table,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp,
+ unsigned int *valp,
+ int write, void *data),
+ void *data)
+{
+ unsigned long lval;
+ int err = 0;
+ size_t left;
+ bool neg;
+ char *kbuf = NULL, *p;
+
+ left = *lenp;
+
+ if (proc_first_pos_non_zero_ignore(ppos, table))
+ goto bail_early;
+
+ if (left > PAGE_SIZE - 1)
+ left = PAGE_SIZE - 1;
+
+ p = kbuf = memdup_user_nul(buffer, left);
+ if (IS_ERR(kbuf))
+ return -EINVAL;
+
+ left -= proc_skip_spaces(&p);
+ if (!left) {
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ err = proc_get_long(&p, &left, &lval, &neg,
+ proc_wspace_sep,
+ sizeof(proc_wspace_sep), NULL);
+ if (err || neg) {
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ if (conv(&lval, tbl_data, 1, data)) {
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ if (!err && left)
+ left -= proc_skip_spaces(&p);
+
+out_free:
+ kfree(kbuf);
+ if (err)
+ return -EINVAL;
+
+ return 0;
+
+ /* This is in keeping with old __do_proc_dointvec() */
+bail_early:
+ *ppos += *lenp;
+ return err;
+}
+
+static int do_proc_douintvec_r(unsigned int *tbl_data, void __user *buffer,
+ size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp,
+ unsigned int *valp,
+ int write, void *data),
+ void *data)
+{
+ unsigned long lval;
+ int err = 0;
+ size_t left;
+
+ left = *lenp;
+
+ if (conv(&lval, tbl_data, 0, data)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = proc_put_long(&buffer, &left, lval, false);
+ if (err || !left)
+ goto out;
+
+ err = proc_put_char(&buffer, &left, '\n');
+
+out:
+ *lenp -= left;
+ *ppos += *lenp;
+
+ return err;
+}
+
+static int __do_proc_douintvec(void *tbl_data, struct ctl_table *table,
+ int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp,
+ unsigned int *valp,
+ int write, void *data),
+ void *data)
+{
+ unsigned int *i, vleft;
+
+ if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+
+ i = (unsigned int *) tbl_data;
+ vleft = table->maxlen / sizeof(*i);
+
+ /*
+ * Arrays are not supported, keep this simple. *Do not* add
+ * support for them.
+ */
+ if (vleft != 1) {
+ *lenp = 0;
+ return -EINVAL;
+ }
+
+ if (!conv)
+ conv = do_proc_douintvec_conv;
+
+ if (write)
+ return do_proc_douintvec_w(i, table, buffer, lenp, ppos,
+ conv, data);
+ return do_proc_douintvec_r(i, buffer, lenp, ppos, conv, data);
+}
+
+static int do_proc_douintvec(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp,
+ unsigned int *valp,
+ int write, void *data),
+ void *data)
+{
+ return __do_proc_douintvec(table->data, table, write,
+ buffer, lenp, ppos, conv, data);
+}
+
/**
* proc_dointvec - read a vector of integers
* @table: the sysctl table
@@ -2284,8 +2466,8 @@ int proc_dointvec(struct ctl_table *table, int write,
int proc_douintvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_dointvec(table, write, buffer, lenp, ppos,
- do_proc_douintvec_conv, NULL);
+ return do_proc_douintvec(table, write, buffer, lenp, ppos,
+ do_proc_douintvec_conv, NULL);
}
/*
@@ -2390,6 +2572,65 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
do_proc_dointvec_minmax_conv, &param);
}
+struct do_proc_douintvec_minmax_conv_param {
+ unsigned int *min;
+ unsigned int *max;
+};
+
+static int do_proc_douintvec_minmax_conv(unsigned long *lvalp,
+ unsigned int *valp,
+ int write, void *data)
+{
+ struct do_proc_douintvec_minmax_conv_param *param = data;
+
+ if (write) {
+ unsigned int val = *lvalp;
+
+ if ((param->min && *param->min > val) ||
+ (param->max && *param->max < val))
+ return -ERANGE;
+
+ if (*lvalp > UINT_MAX)
+ return -EINVAL;
+ *valp = val;
+ } else {
+ unsigned int val = *valp;
+ *lvalp = (unsigned long) val;
+ }
+
+ return 0;
+}
+
+/**
+ * proc_douintvec_minmax - read a vector of unsigned ints with min/max values
+ * @table: the sysctl table
+ * @write: %TRUE if this is a write to the sysctl file
+ * @buffer: the user buffer
+ * @lenp: the size of the user buffer
+ * @ppos: file position
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) unsigned integer
+ * values from/to the user buffer, treated as an ASCII string. Negative
+ * strings are not allowed.
+ *
+ * This routine will ensure the values are within the range specified by
+ * table->extra1 (min) and table->extra2 (max). There is a final sanity
+ * check for UINT_MAX to avoid having to support wrap around uses from
+ * userspace.
+ *
+ * Returns 0 on success.
+ */
+int proc_douintvec_minmax(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct do_proc_douintvec_minmax_conv_param param = {
+ .min = (unsigned int *) table->extra1,
+ .max = (unsigned int *) table->extra2,
+ };
+ return do_proc_douintvec(table, write, buffer, lenp, ppos,
+ do_proc_douintvec_minmax_conv, &param);
+}
+
static void validate_coredump_safety(void)
{
#ifdef CONFIG_COREDUMP
@@ -2447,17 +2688,8 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
left = *lenp;
if (write) {
- if (*ppos) {
- switch (sysctl_writes_strict) {
- case SYSCTL_WRITES_STRICT:
- goto out;
- case SYSCTL_WRITES_WARN:
- warn_sysctl_write(table);
- break;
- default:
- break;
- }
- }
+ if (proc_first_pos_non_zero_ignore(ppos, table))
+ goto out;
if (left > PAGE_SIZE - 1)
left = PAGE_SIZE - 1;
@@ -2898,6 +3130,12 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
return -ENOSYS;
}
+int proc_douintvec_minmax(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return -ENOSYS;
+}
+
int proc_dointvec_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -2940,6 +3178,7 @@ EXPORT_SYMBOL(proc_dointvec);
EXPORT_SYMBOL(proc_douintvec);
EXPORT_SYMBOL(proc_dointvec_jiffies);
EXPORT_SYMBOL(proc_dointvec_minmax);
+EXPORT_SYMBOL_GPL(proc_douintvec_minmax);
EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
EXPORT_SYMBOL(proc_dostring);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 939a158eab11..02e1859f2ca8 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1346,7 +1346,7 @@ static void deprecated_sysctl_warning(const int *name, int nlen)
* CTL_KERN/KERN_VERSION is used by older glibc and cannot
* ever go away.
*/
- if (name[0] == CTL_KERN && name[1] == KERN_VERSION)
+ if (nlen >= 2 && name[0] == CTL_KERN && name[1] == KERN_VERSION)
return;
if (printk_ratelimit()) {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2953d558bbee..53f6b6401cf0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3816,7 +3816,7 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
int exclude_mod = 0;
int found = 0;
int ret;
- int clear_filter;
+ int clear_filter = 0;
if (func) {
func_g.type = filter_parse_regex(func, len, &func_g.search,
@@ -3950,7 +3950,7 @@ static int cache_mod(struct trace_array *tr,
continue;
/* no func matches all */
- if (!func || strcmp(func, "*") == 0 ||
+ if (strcmp(func, "*") == 0 ||
(ftrace_mod->func &&
strcmp(ftrace_mod->func, func) == 0)) {
ret = 0;
@@ -3978,6 +3978,7 @@ static int
ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
int reset, int enable);
+#ifdef CONFIG_MODULES
static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
char *mod, bool enable)
{
@@ -4068,6 +4069,7 @@ static void process_cached_mods(const char *mod_name)
kfree(mod);
}
+#endif
/*
* We register the module command as a template to show others how
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 948ec32e0c27..2d0ffcc49dba 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1916,7 +1916,11 @@ static int trace_save_cmdline(struct task_struct *tsk)
{
unsigned pid, idx;
- if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
+ /* treat recording of idle task as a success */
+ if (!tsk->pid)
+ return 1;
+
+ if (unlikely(tsk->pid > PID_MAX_DEFAULT))
return 0;
/*
@@ -2002,7 +2006,11 @@ int trace_find_tgid(int pid)
static int trace_save_tgid(struct task_struct *tsk)
{
- if (unlikely(!tgid_map || !tsk->pid || tsk->pid > PID_MAX_DEFAULT))
+ /* treat recording of idle task as a success */
+ if (!tsk->pid)
+ return 1;
+
+ if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
return 0;
tgid_map[tsk->pid] = tsk->tgid;
@@ -2029,11 +2037,20 @@ static bool tracing_record_taskinfo_skip(int flags)
*/
void tracing_record_taskinfo(struct task_struct *task, int flags)
{
+ bool done;
+
if (tracing_record_taskinfo_skip(flags))
return;
- if ((flags & TRACE_RECORD_CMDLINE) && !trace_save_cmdline(task))
- return;
- if ((flags & TRACE_RECORD_TGID) && !trace_save_tgid(task))
+
+ /*
+ * Record as much task information as possible. If some fail, continue
+ * to try to record the others.
+ */
+ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
+ done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
+
+ /* If recording any information failed, retry again soon. */
+ if (!done)
return;
__this_cpu_write(trace_taskinfo_save, false);
@@ -2050,15 +2067,22 @@ void tracing_record_taskinfo(struct task_struct *task, int flags)
void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
struct task_struct *next, int flags)
{
+ bool done;
+
if (tracing_record_taskinfo_skip(flags))
return;
- if ((flags & TRACE_RECORD_CMDLINE) &&
- (!trace_save_cmdline(prev) || !trace_save_cmdline(next)))
- return;
+ /*
+ * Record as much task information as possible. If some fail, continue
+ * to try to record the others.
+ */
+ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
+ done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
+ done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
+ done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
- if ((flags & TRACE_RECORD_TGID) &&
- (!trace_save_tgid(prev) || !trace_save_tgid(next)))
+ /* If recording any information failed, retry again soon. */
+ if (!done)
return;
__this_cpu_write(trace_taskinfo_save, false);
@@ -3334,14 +3358,23 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
unsigned int flags)
{
bool tgid = flags & TRACE_ITER_RECORD_TGID;
-
- seq_printf(m, "# %s _-----=> irqs-off\n", tgid ? " " : "");
- seq_printf(m, "# %s / _----=> need-resched\n", tgid ? " " : "");
- seq_printf(m, "# %s| / _---=> hardirq/softirq\n", tgid ? " " : "");
- seq_printf(m, "# %s|| / _--=> preempt-depth\n", tgid ? " " : "");
- seq_printf(m, "# %s||| / delay\n", tgid ? " " : "");
- seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
- seq_printf(m, "# | | | %s|||| | |\n", tgid ? " | " : "");
+ const char tgid_space[] = " ";
+ const char space[] = " ";
+
+ seq_printf(m, "# %s _-----=> irqs-off\n",
+ tgid ? tgid_space : space);
+ seq_printf(m, "# %s / _----=> need-resched\n",
+ tgid ? tgid_space : space);
+ seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
+ tgid ? tgid_space : space);
+ seq_printf(m, "# %s|| / _--=> preempt-depth\n",
+ tgid ? tgid_space : space);
+ seq_printf(m, "# %s||| / delay\n",
+ tgid ? tgid_space : space);
+ seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
+ tgid ? " TGID " : space);
+ seq_printf(m, "# | | | %s|||| | |\n",
+ tgid ? " | " : space);
}
void
@@ -4689,6 +4722,76 @@ static const struct file_operations tracing_readme_fops = {
.llseek = generic_file_llseek,
};
+static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ int *ptr = v;
+
+ if (*pos || m->count)
+ ptr++;
+
+ (*pos)++;
+
+ for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
+ if (trace_find_tgid(*ptr))
+ return ptr;
+ }
+
+ return NULL;
+}
+
+static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
+{
+ void *v;
+ loff_t l = 0;
+
+ if (!tgid_map)
+ return NULL;
+
+ v = &tgid_map[0];
+ while (l <= *pos) {
+ v = saved_tgids_next(m, v, &l);
+ if (!v)
+ return NULL;
+ }
+
+ return v;
+}
+
+static void saved_tgids_stop(struct seq_file *m, void *v)
+{
+}
+
+static int saved_tgids_show(struct seq_file *m, void *v)
+{
+ int pid = (int *)v - tgid_map;
+
+ seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
+ return 0;
+}
+
+static const struct seq_operations tracing_saved_tgids_seq_ops = {
+ .start = saved_tgids_start,
+ .stop = saved_tgids_stop,
+ .next = saved_tgids_next,
+ .show = saved_tgids_show,
+};
+
+static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
+{
+ if (tracing_disabled)
+ return -ENODEV;
+
+ return seq_open(filp, &tracing_saved_tgids_seq_ops);
+}
+
+
+static const struct file_operations tracing_saved_tgids_fops = {
+ .open = tracing_saved_tgids_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
{
unsigned int *ptr = v;
@@ -7921,6 +8024,9 @@ static __init int tracer_init_tracefs(void)
trace_create_file("saved_cmdlines_size", 0644, d_tracer,
NULL, &tracing_saved_cmdlines_size_fops);
+ trace_create_file("saved_tgids", 0444, d_tracer,
+ NULL, &tracing_saved_tgids_fops);
+
trace_eval_init();
trace_create_eval_file(d_tracer);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 2c5221819be5..c9b5aa10fbf9 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -598,6 +598,14 @@ static struct notifier_block trace_kprobe_module_nb = {
.priority = 1 /* Invoked after kprobe module callback */
};
+/* Convert certain expected symbols into '_' when generating event names */
+static inline void sanitize_event_name(char *name)
+{
+ while (*name++ != '\0')
+ if (*name == ':' || *name == '.')
+ *name = '_';
+}
+
static int create_trace_kprobe(int argc, char **argv)
{
/*
@@ -736,6 +744,7 @@ static int create_trace_kprobe(int argc, char **argv)
else
snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
is_return ? 'r' : 'p', addr);
+ sanitize_event_name(buf);
event = buf;
}
tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index b4a751e8f9d6..a4df67cbc711 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -406,6 +406,8 @@ static const struct file_operations stack_trace_fops = {
.release = seq_release,
};
+#ifdef CONFIG_DYNAMIC_FTRACE
+
static int
stack_trace_filter_open(struct inode *inode, struct file *file)
{
@@ -423,6 +425,8 @@ static const struct file_operations stack_trace_filter_fops = {
.release = ftrace_regex_release,
};
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
int
stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
@@ -477,8 +481,10 @@ static __init int stack_trace_init(void)
trace_create_file("stack_trace", 0444, d_tracer,
NULL, &stack_trace_fops);
+#ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("stack_trace_filter", 0444, d_tracer,
&trace_ops, &stack_trace_filter_fops);
+#endif
if (stack_trace_filter_buf[0])
ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 03e0b69bb5bf..06d3389bca0d 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -9,7 +9,7 @@
* to those contributors as well.
*/
-#define pr_fmt(fmt) "NMI watchdog: " fmt
+#define pr_fmt(fmt) "watchdog: " fmt
#include <linux/mm.h>
#include <linux/cpu.h>
@@ -29,15 +29,58 @@
#include <linux/kvm_para.h>
#include <linux/kthread.h>
+/* Watchdog configuration */
static DEFINE_MUTEX(watchdog_proc_mutex);
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
-unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
+int __read_mostly nmi_watchdog_enabled;
+
+#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED |
+ NMI_WATCHDOG_ENABLED;
#else
unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
#endif
-int __read_mostly nmi_watchdog_enabled;
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+/* boot commands */
+/*
+ * Should we panic when a soft-lockup or hard-lockup occurs:
+ */
+unsigned int __read_mostly hardlockup_panic =
+ CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
+/*
+ * We may not want to enable hard lockup detection by default in all cases,
+ * for example when running the kernel as a guest on a hypervisor. In these
+ * cases this function can be called to disable hard lockup detection. This
+ * function should only be executed once by the boot processor before the
+ * kernel command line parameters are parsed, because otherwise it is not
+ * possible to override this in hardlockup_panic_setup().
+ */
+void hardlockup_detector_disable(void)
+{
+ watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+}
+
+static int __init hardlockup_panic_setup(char *str)
+{
+ if (!strncmp(str, "panic", 5))
+ hardlockup_panic = 1;
+ else if (!strncmp(str, "nopanic", 7))
+ hardlockup_panic = 0;
+ else if (!strncmp(str, "0", 1))
+ watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+ else if (!strncmp(str, "1", 1))
+ watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+ return 1;
+}
+__setup("nmi_watchdog=", hardlockup_panic_setup);
+
+#endif
+
+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
int __read_mostly soft_watchdog_enabled;
+#endif
+
int __read_mostly watchdog_user_enabled;
int __read_mostly watchdog_thresh = 10;
@@ -45,15 +88,9 @@ int __read_mostly watchdog_thresh = 10;
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
#endif
-static struct cpumask watchdog_cpumask __read_mostly;
+struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
-/* Helper for online, unparked cpus. */
-#define for_each_watchdog_cpu(cpu) \
- for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
-
-atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
-
/*
* The 'watchdog_running' variable is set to 1 when the watchdog threads
* are registered/started and is set to 0 when the watchdog threads are
@@ -72,7 +109,47 @@ static int __read_mostly watchdog_running;
* of 'watchdog_running' cannot change while the watchdog is deactivated
* temporarily (see related code in 'proc' handlers).
*/
-static int __read_mostly watchdog_suspended;
+int __read_mostly watchdog_suspended;
+
+/*
+ * These functions can be overridden if an architecture implements its
+ * own hardlockup detector.
+ *
+ * watchdog_nmi_enable/disable can be implemented to start and stop when
+ * softlockup watchdog threads start and stop. The arch must select the
+ * SOFTLOCKUP_DETECTOR Kconfig.
+ */
+int __weak watchdog_nmi_enable(unsigned int cpu)
+{
+ return 0;
+}
+void __weak watchdog_nmi_disable(unsigned int cpu)
+{
+}
+
+/*
+ * watchdog_nmi_reconfigure can be implemented to be notified after any
+ * watchdog configuration change. The arch hardlockup watchdog should
+ * respond to the following variables:
+ * - nmi_watchdog_enabled
+ * - watchdog_thresh
+ * - watchdog_cpumask
+ * - sysctl_hardlockup_all_cpu_backtrace
+ * - hardlockup_panic
+ * - watchdog_suspended
+ */
+void __weak watchdog_nmi_reconfigure(void)
+{
+}
+
+
+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
+
+/* Helper for online, unparked cpus. */
+#define for_each_watchdog_cpu(cpu) \
+ for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
+
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
static u64 __read_mostly sample_period;
@@ -120,6 +197,7 @@ static int __init softlockup_all_cpu_backtrace_setup(char *str)
return 1;
}
__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
static int __init hardlockup_all_cpu_backtrace_setup(char *str)
{
sysctl_hardlockup_all_cpu_backtrace =
@@ -128,6 +206,7 @@ static int __init hardlockup_all_cpu_backtrace_setup(char *str)
}
__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
#endif
+#endif
/*
* Hard-lockup warnings should be triggered after just a few seconds. Soft-
@@ -213,18 +292,6 @@ void touch_softlockup_watchdog_sync(void)
__this_cpu_write(watchdog_touch_ts, 0);
}
-/* watchdog detector functions */
-bool is_hardlockup(void)
-{
- unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
-
- if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
- return true;
-
- __this_cpu_write(hrtimer_interrupts_saved, hrint);
- return false;
-}
-
static int is_softlockup(unsigned long touch_ts)
{
unsigned long now = get_timestamp();
@@ -237,21 +304,21 @@ static int is_softlockup(unsigned long touch_ts)
return 0;
}
-static void watchdog_interrupt_count(void)
+/* watchdog detector functions */
+bool is_hardlockup(void)
{
- __this_cpu_inc(hrtimer_interrupts);
-}
+ unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
-/*
- * These two functions are mostly architecture specific
- * defining them as weak here.
- */
-int __weak watchdog_nmi_enable(unsigned int cpu)
-{
- return 0;
+ if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
+ return true;
+
+ __this_cpu_write(hrtimer_interrupts_saved, hrint);
+ return false;
}
-void __weak watchdog_nmi_disable(unsigned int cpu)
+
+static void watchdog_interrupt_count(void)
{
+ __this_cpu_inc(hrtimer_interrupts);
}
static int watchdog_enable_all_cpus(void);
@@ -502,57 +569,6 @@ static void watchdog_unpark_threads(void)
kthread_unpark(per_cpu(softlockup_watchdog, cpu));
}
-/*
- * Suspend the hard and soft lockup detector by parking the watchdog threads.
- */
-int lockup_detector_suspend(void)
-{
- int ret = 0;
-
- get_online_cpus();
- mutex_lock(&watchdog_proc_mutex);
- /*
- * Multiple suspend requests can be active in parallel (counted by
- * the 'watchdog_suspended' variable). If the watchdog threads are
- * running, the first caller takes care that they will be parked.
- * The state of 'watchdog_running' cannot change while a suspend
- * request is active (see related code in 'proc' handlers).
- */
- if (watchdog_running && !watchdog_suspended)
- ret = watchdog_park_threads();
-
- if (ret == 0)
- watchdog_suspended++;
- else {
- watchdog_disable_all_cpus();
- pr_err("Failed to suspend lockup detectors, disabled\n");
- watchdog_enabled = 0;
- }
-
- mutex_unlock(&watchdog_proc_mutex);
-
- return ret;
-}
-
-/*
- * Resume the hard and soft lockup detector by unparking the watchdog threads.
- */
-void lockup_detector_resume(void)
-{
- mutex_lock(&watchdog_proc_mutex);
-
- watchdog_suspended--;
- /*
- * The watchdog threads are unparked if they were previously running
- * and if there is no more active suspend request.
- */
- if (watchdog_running && !watchdog_suspended)
- watchdog_unpark_threads();
-
- mutex_unlock(&watchdog_proc_mutex);
- put_online_cpus();
-}
-
static int update_watchdog_all_cpus(void)
{
int ret;
@@ -605,6 +621,100 @@ static void watchdog_disable_all_cpus(void)
}
#ifdef CONFIG_SYSCTL
+static int watchdog_update_cpus(void)
+{
+ return smpboot_update_cpumask_percpu_thread(
+ &watchdog_threads, &watchdog_cpumask);
+}
+#endif
+
+#else /* SOFTLOCKUP */
+static int watchdog_park_threads(void)
+{
+ return 0;
+}
+
+static void watchdog_unpark_threads(void)
+{
+}
+
+static int watchdog_enable_all_cpus(void)
+{
+ return 0;
+}
+
+static void watchdog_disable_all_cpus(void)
+{
+}
+
+#ifdef CONFIG_SYSCTL
+static int watchdog_update_cpus(void)
+{
+ return 0;
+}
+#endif
+
+static void set_sample_period(void)
+{
+}
+#endif /* SOFTLOCKUP */
+
+/*
+ * Suspend the hard and soft lockup detector by parking the watchdog threads.
+ */
+int lockup_detector_suspend(void)
+{
+ int ret = 0;
+
+ get_online_cpus();
+ mutex_lock(&watchdog_proc_mutex);
+ /*
+ * Multiple suspend requests can be active in parallel (counted by
+ * the 'watchdog_suspended' variable). If the watchdog threads are
+ * running, the first caller takes care that they will be parked.
+ * The state of 'watchdog_running' cannot change while a suspend
+ * request is active (see related code in 'proc' handlers).
+ */
+ if (watchdog_running && !watchdog_suspended)
+ ret = watchdog_park_threads();
+
+ if (ret == 0)
+ watchdog_suspended++;
+ else {
+ watchdog_disable_all_cpus();
+ pr_err("Failed to suspend lockup detectors, disabled\n");
+ watchdog_enabled = 0;
+ }
+
+ watchdog_nmi_reconfigure();
+
+ mutex_unlock(&watchdog_proc_mutex);
+
+ return ret;
+}
+
+/*
+ * Resume the hard and soft lockup detector by unparking the watchdog threads.
+ */
+void lockup_detector_resume(void)
+{
+ mutex_lock(&watchdog_proc_mutex);
+
+ watchdog_suspended--;
+ /*
+ * The watchdog threads are unparked if they were previously running
+ * and if there is no more active suspend request.
+ */
+ if (watchdog_running && !watchdog_suspended)
+ watchdog_unpark_threads();
+
+ watchdog_nmi_reconfigure();
+
+ mutex_unlock(&watchdog_proc_mutex);
+ put_online_cpus();
+}
+
+#ifdef CONFIG_SYSCTL
/*
* Update the run state of the lockup detectors.
@@ -625,6 +735,8 @@ static int proc_watchdog_update(void)
else
watchdog_disable_all_cpus();
+ watchdog_nmi_reconfigure();
+
return err;
}
@@ -810,10 +922,11 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
* a temporary cpumask, so we are likely not in a
* position to do much else to make things better.
*/
- if (smpboot_update_cpumask_percpu_thread(
- &watchdog_threads, &watchdog_cpumask) != 0)
+ if (watchdog_update_cpus() != 0)
pr_err("cpumask update failed\n");
}
+
+ watchdog_nmi_reconfigure();
}
out:
mutex_unlock(&watchdog_proc_mutex);
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 54a427d1f344..295a0d84934c 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -22,41 +22,9 @@ static DEFINE_PER_CPU(bool, hard_watchdog_warn);
static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
-/* boot commands */
-/*
- * Should we panic when a soft-lockup or hard-lockup occurs:
- */
-unsigned int __read_mostly hardlockup_panic =
- CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
static unsigned long hardlockup_allcpu_dumped;
-/*
- * We may not want to enable hard lockup detection by default in all cases,
- * for example when running the kernel as a guest on a hypervisor. In these
- * cases this function can be called to disable hard lockup detection. This
- * function should only be executed once by the boot processor before the
- * kernel command line parameters are parsed, because otherwise it is not
- * possible to override this in hardlockup_panic_setup().
- */
-void hardlockup_detector_disable(void)
-{
- watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
-}
-
-static int __init hardlockup_panic_setup(char *str)
-{
- if (!strncmp(str, "panic", 5))
- hardlockup_panic = 1;
- else if (!strncmp(str, "nopanic", 7))
- hardlockup_panic = 0;
- else if (!strncmp(str, "0", 1))
- watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
- else if (!strncmp(str, "1", 1))
- watchdog_enabled |= NMI_WATCHDOG_ENABLED;
- return 1;
-}
-__setup("nmi_watchdog=", hardlockup_panic_setup);
-void touch_nmi_watchdog(void)
+void arch_touch_nmi_watchdog(void)
{
/*
* Using __raw here because some code paths have
@@ -66,9 +34,8 @@ void touch_nmi_watchdog(void)
* going off.
*/
raw_cpu_write(watchdog_nmi_touch, true);
- touch_softlockup_watchdog();
}
-EXPORT_SYMBOL(touch_nmi_watchdog);
+EXPORT_SYMBOL(arch_touch_nmi_watchdog);
static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE,
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e20fc079bebd..98fe715522e8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -778,34 +778,45 @@ config DEBUG_SHIRQ
menu "Debug Lockups and Hangs"
config LOCKUP_DETECTOR
- bool "Detect Hard and Soft Lockups"
+ bool
+
+config SOFTLOCKUP_DETECTOR
+ bool "Detect Soft Lockups"
depends on DEBUG_KERNEL && !S390
+ select LOCKUP_DETECTOR
help
Say Y here to enable the kernel to act as a watchdog to detect
- hard and soft lockups.
+ soft lockups.
Softlockups are bugs that cause the kernel to loop in kernel
mode for more than 20 seconds, without giving other tasks a
chance to run. The current stack trace is displayed upon
detection and the system will stay locked up.
+config HARDLOCKUP_DETECTOR_PERF
+ bool
+ select SOFTLOCKUP_DETECTOR
+
+#
+# arch/ can define HAVE_HARDLOCKUP_DETECTOR_ARCH to provide their own hard
+# lockup detector rather than the perf based detector.
+#
+config HARDLOCKUP_DETECTOR
+ bool "Detect Hard Lockups"
+ depends on DEBUG_KERNEL && !S390
+ depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_ARCH
+ select LOCKUP_DETECTOR
+ select HARDLOCKUP_DETECTOR_PERF if HAVE_HARDLOCKUP_DETECTOR_PERF
+ select HARDLOCKUP_DETECTOR_ARCH if HAVE_HARDLOCKUP_DETECTOR_ARCH
+ help
+ Say Y here to enable the kernel to act as a watchdog to detect
+ hard lockups.
+
Hardlockups are bugs that cause the CPU to loop in kernel mode
for more than 10 seconds, without letting other interrupts have a
chance to run. The current stack trace is displayed upon detection
and the system will stay locked up.
- The overhead should be minimal. A periodic hrtimer runs to
- generate interrupts and kick the watchdog task every 4 seconds.
- An NMI is generated every 10 seconds or so to check for hardlockups.
-
- The frequency of hrtimer and NMI events and the soft and hard lockup
- thresholds can be controlled through the sysctl watchdog_thresh.
-
-config HARDLOCKUP_DETECTOR
- def_bool y
- depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
- depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
-
config BOOTPARAM_HARDLOCKUP_PANIC
bool "Panic (Reboot) On Hard Lockups"
depends on HARDLOCKUP_DETECTOR
@@ -826,7 +837,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
config BOOTPARAM_SOFTLOCKUP_PANIC
bool "Panic (Reboot) On Soft Lockups"
- depends on LOCKUP_DETECTOR
+ depends on SOFTLOCKUP_DETECTOR
help
Say Y here to enable the kernel to panic on "soft lockups",
which are bugs that cause the kernel to loop in kernel
@@ -843,7 +854,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC
config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
int
- depends on LOCKUP_DETECTOR
+ depends on SOFTLOCKUP_DETECTOR
range 0 1
default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
@@ -851,7 +862,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
config DETECT_HUNG_TASK
bool "Detect Hung Tasks"
depends on DEBUG_KERNEL
- default LOCKUP_DETECTOR
+ default SOFTLOCKUP_DETECTOR
help
Say Y here to enable the kernel to detect "hung tasks",
which are bugs that cause the task to be stuck in
@@ -1212,6 +1223,34 @@ config STACKTRACE
It is also used by various kernel debugging features that require
stack trace generation.
+config WARN_ALL_UNSEEDED_RANDOM
+ bool "Warn for all uses of unseeded randomness"
+ default n
+ help
+ Some parts of the kernel contain bugs relating to their use of
+ cryptographically secure random numbers before it's actually possible
+ to generate those numbers securely. This setting ensures that these
+ flaws don't go unnoticed, by enabling a message, should this ever
+ occur. This will allow people with obscure setups to know when things
+ are going wrong, so that they might contact developers about fixing
+ it.
+
+ Unfortunately, on some models of some architectures getting
+ a fully seeded CRNG is extremely difficult, and so this can
+ result in dmesg getting spammed for a surprisingly long
+ time. This is really bad from a security perspective, and
+ so architecture maintainers really need to do what they can
+ to get the CRNG seeded sooner after the system is booted.
+ However, since users can not do anything actionble to
+ address this, by default the kernel will issue only a single
+ warning for the first use of unseeded randomness.
+
+ Say Y here if you want to receive warnings for all uses of
+ unseeded randomness. This will be of use primarily for
+ those developers interersted in improving the security of
+ Linux kernels running on their architecture (or
+ subarchitecture).
+
config DEBUG_KOBJECT
bool "kobject debugging"
depends on DEBUG_KERNEL
@@ -1785,6 +1824,17 @@ config TEST_FIRMWARE
If unsure, say N.
+config TEST_SYSCTL
+ tristate "sysctl test driver"
+ default n
+ depends on PROC_SYSCTL
+ help
+ This builds the "test_sysctl" module. This driver enables to test the
+ proc sysctl interfaces available to drivers safely without affecting
+ production knobs which might alter system functionality.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
default n
@@ -1825,6 +1875,33 @@ config BUG_ON_DATA_CORRUPTION
If unsure, say N.
+config TEST_KMOD
+ tristate "kmod stress tester"
+ default n
+ depends on m
+ depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
+ depends on NETDEVICES && NET_CORE && INET # for TUN
+ select TEST_LKM
+ select XFS_FS
+ select TUN
+ select BTRFS_FS
+ help
+ Test the kernel's module loading mechanism: kmod. kmod implements
+ support to load modules using the Linux kernel's usermode helper.
+ This test provides a series of tests against kmod.
+
+ Although technically you can either build test_kmod as a module or
+ into the kernel we disallow building it into the kernel since
+ it stress tests request_module() and this will very likely cause
+ some issues by taking over precious threads available from other
+ module load requests, ultimately this could be fatal.
+
+ To run tests run:
+
+ tools/testing/selftests/kmod/kmod.sh --help
+
+ If unsure, say N.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 5a008329324e..40c18372b301 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
obj-$(CONFIG_TEST_KASAN) += test_kasan.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
@@ -60,6 +61,7 @@ obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
+obj-$(CONFIG_TEST_KMOD) += test_kmod.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index fd70c0e0e673..62ab629f51ca 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -153,8 +153,10 @@ static __init void test_atomic64(void)
long long v0 = 0xaaa31337c001d00dLL;
long long v1 = 0xdeadbeefdeafcafeLL;
long long v2 = 0xfaceabadf00df001LL;
+ long long v3 = 0x8000000000000000LL;
long long onestwos = 0x1111111122222222LL;
long long one = 1LL;
+ int r_int;
atomic64_t v = ATOMIC64_INIT(v0);
long long r = v0;
@@ -240,6 +242,11 @@ static __init void test_atomic64(void)
BUG_ON(!atomic64_inc_not_zero(&v));
r += one;
BUG_ON(v.counter != r);
+
+ /* Confirm the return value fits in an int, even if the value doesn't */
+ INIT(v3);
+ r_int = atomic64_inc_not_zero(&v);
+ BUG_ON(!r_int);
}
static __init int test_atomics_init(void)
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 4ff157159a0d..7d315fdb9f13 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -107,6 +107,15 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
bool should_fail(struct fault_attr *attr, ssize_t size)
{
+ if (in_task()) {
+ unsigned int fail_nth = READ_ONCE(current->fail_nth);
+
+ if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1))
+ goto fail;
+
+ return false;
+ }
+
/* No need to check any other properties if the probability is 0 */
if (attr->probability == 0)
return false;
@@ -134,6 +143,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
if (!fail_stacktrace(attr))
return false;
+fail:
fail_dump(attr);
if (atomic_read(&attr->times) != -1)
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 8ee7e5ec21be..3bf4a9984f4c 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -72,6 +72,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
}
EXPORT_SYMBOL(percpu_counter_set);
+/**
+ * This function is both preempt and irq safe. The former is due to explicit
+ * preemption disable. The latter is guaranteed by the fact that the slow path
+ * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
+ * this_cpu_add which is irq-safe by definition. Hence there is no need muck
+ * with irq state before calling this one
+ */
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 42466c167257..707ca5d677c6 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -234,7 +234,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
INIT_LIST_HEAD(&tbl->walkers);
- get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
+ tbl->hash_rnd = get_random_u32();
for (i = 0; i < nbuckets; i++)
INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
diff --git a/lib/string.c b/lib/string.c
index 1c1fc9187b05..ebbb99c775bd 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -978,3 +978,10 @@ char *strreplace(char *s, char old, char new)
return s;
}
EXPORT_SYMBOL(strreplace);
+
+void fortify_panic(const char *name)
+{
+ pr_emerg("detected buffer overflow in %s\n", name);
+ BUG();
+}
+EXPORT_SYMBOL(fortify_panic);
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
new file mode 100644
index 000000000000..6c1d678bcf8b
--- /dev/null
+++ b/lib/test_kmod.c
@@ -0,0 +1,1246 @@
+/*
+ * kmod stress test driver
+ *
+ * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or at your option any
+ * later version; or, when distributed separately from the Linux kernel or
+ * when incorporated into other software packages, subject to the following
+ * license:
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of copyleft-next (version 0.3.1 or later) as published
+ * at http://copyleft-next.org/.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/*
+ * This driver provides an interface to trigger and test the kernel's
+ * module loader through a series of configurations and a few triggers.
+ * To test this driver use the following script as root:
+ *
+ * tools/testing/selftests/kmod/kmod.sh --help
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/printk.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#define TEST_START_NUM_THREADS 50
+#define TEST_START_DRIVER "test_module"
+#define TEST_START_TEST_FS "xfs"
+#define TEST_START_TEST_CASE TEST_KMOD_DRIVER
+
+
+static bool force_init_test = false;
+module_param(force_init_test, bool_enable_only, 0644);
+MODULE_PARM_DESC(force_init_test,
+ "Force kicking a test immediately after driver loads");
+
+/*
+ * For device allocation / registration
+ */
+static DEFINE_MUTEX(reg_dev_mutex);
+static LIST_HEAD(reg_test_devs);
+
+/*
+ * num_test_devs actually represents the *next* ID of the next
+ * device we will allow to create.
+ */
+static int num_test_devs;
+
+/**
+ * enum kmod_test_case - linker table test case
+ *
+ * If you add a test case, please be sure to review if you need to se
+ * @need_mod_put for your tests case.
+ *
+ * @TEST_KMOD_DRIVER: stress tests request_module()
+ * @TEST_KMOD_FS_TYPE: stress tests get_fs_type()
+ */
+enum kmod_test_case {
+ __TEST_KMOD_INVALID = 0,
+
+ TEST_KMOD_DRIVER,
+ TEST_KMOD_FS_TYPE,
+
+ __TEST_KMOD_MAX,
+};
+
+struct test_config {
+ char *test_driver;
+ char *test_fs;
+ unsigned int num_threads;
+ enum kmod_test_case test_case;
+ int test_result;
+};
+
+struct kmod_test_device;
+
+/**
+ * kmod_test_device_info - thread info
+ *
+ * @ret_sync: return value if request_module() is used, sync request for
+ * @TEST_KMOD_DRIVER
+ * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
+ * @thread_idx: thread ID
+ * @test_dev: test device test is being performed under
+ * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
+ * (module_put(fs_sync->owner)) when done, otherwise you will not be able
+ * to unload the respective modules and re-test. We use this to keep
+ * accounting of when we need this and to help out in case we need to
+ * error out and deal with module_put() on error.
+ */
+struct kmod_test_device_info {
+ int ret_sync;
+ struct file_system_type *fs_sync;
+ struct task_struct *task_sync;
+ unsigned int thread_idx;
+ struct kmod_test_device *test_dev;
+ bool need_mod_put;
+};
+
+/**
+ * kmod_test_device - test device to help test kmod
+ *
+ * @dev_idx: unique ID for test device
+ * @config: configuration for the test
+ * @misc_dev: we use a misc device under the hood
+ * @dev: pointer to misc_dev's own struct device
+ * @config_mutex: protects configuration of test
+ * @trigger_mutex: the test trigger can only be fired once at a time
+ * @thread_lock: protects @done count, and the @info per each thread
+ * @done: number of threads which have completed or failed
+ * @test_is_oom: when we run out of memory, use this to halt moving forward
+ * @kthreads_done: completion used to signal when all work is done
+ * @list: needed to be part of the reg_test_devs
+ * @info: array of info for each thread
+ */
+struct kmod_test_device {
+ int dev_idx;
+ struct test_config config;
+ struct miscdevice misc_dev;
+ struct device *dev;
+ struct mutex config_mutex;
+ struct mutex trigger_mutex;
+ struct mutex thread_mutex;
+
+ unsigned int done;
+
+ bool test_is_oom;
+ struct completion kthreads_done;
+ struct list_head list;
+
+ struct kmod_test_device_info *info;
+};
+
+static const char *test_case_str(enum kmod_test_case test_case)
+{
+ switch (test_case) {
+ case TEST_KMOD_DRIVER:
+ return "TEST_KMOD_DRIVER";
+ case TEST_KMOD_FS_TYPE:
+ return "TEST_KMOD_FS_TYPE";
+ default:
+ return "invalid";
+ }
+}
+
+static struct miscdevice *dev_to_misc_dev(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+
+static struct kmod_test_device *misc_dev_to_test_dev(struct miscdevice *misc_dev)
+{
+ return container_of(misc_dev, struct kmod_test_device, misc_dev);
+}
+
+static struct kmod_test_device *dev_to_test_dev(struct device *dev)
+{
+ struct miscdevice *misc_dev;
+
+ misc_dev = dev_to_misc_dev(dev);
+
+ return misc_dev_to_test_dev(misc_dev);
+}
+
+/* Must run with thread_mutex held */
+static void kmod_test_done_check(struct kmod_test_device *test_dev,
+ unsigned int idx)
+{
+ struct test_config *config = &test_dev->config;
+
+ test_dev->done++;
+ dev_dbg(test_dev->dev, "Done thread count: %u\n", test_dev->done);
+
+ if (test_dev->done == config->num_threads) {
+ dev_info(test_dev->dev, "Done: %u threads have all run now\n",
+ test_dev->done);
+ dev_info(test_dev->dev, "Last thread to run: %u\n", idx);
+ complete(&test_dev->kthreads_done);
+ }
+}
+
+static void test_kmod_put_module(struct kmod_test_device_info *info)
+{
+ struct kmod_test_device *test_dev = info->test_dev;
+ struct test_config *config = &test_dev->config;
+
+ if (!info->need_mod_put)
+ return;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ break;
+ case TEST_KMOD_FS_TYPE:
+ if (info && info->fs_sync && info->fs_sync->owner)
+ module_put(info->fs_sync->owner);
+ break;
+ default:
+ BUG();
+ }
+
+ info->need_mod_put = true;
+}
+
+static int run_request(void *data)
+{
+ struct kmod_test_device_info *info = data;
+ struct kmod_test_device *test_dev = info->test_dev;
+ struct test_config *config = &test_dev->config;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ info->ret_sync = request_module("%s", config->test_driver);
+ break;
+ case TEST_KMOD_FS_TYPE:
+ info->fs_sync = get_fs_type(config->test_fs);
+ info->need_mod_put = true;
+ break;
+ default:
+ /* __trigger_config_run() already checked for test sanity */
+ BUG();
+ return -EINVAL;
+ }
+
+ dev_dbg(test_dev->dev, "Ran thread %u\n", info->thread_idx);
+
+ test_kmod_put_module(info);
+
+ mutex_lock(&test_dev->thread_mutex);
+ info->task_sync = NULL;
+ kmod_test_done_check(test_dev, info->thread_idx);
+ mutex_unlock(&test_dev->thread_mutex);
+
+ return 0;
+}
+
+static int tally_work_test(struct kmod_test_device_info *info)
+{
+ struct kmod_test_device *test_dev = info->test_dev;
+ struct test_config *config = &test_dev->config;
+ int err_ret = 0;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ /*
+ * Only capture errors, if one is found that's
+ * enough, for now.
+ */
+ if (info->ret_sync != 0)
+ err_ret = info->ret_sync;
+ dev_info(test_dev->dev,
+ "Sync thread %d return status: %d\n",
+ info->thread_idx, info->ret_sync);
+ break;
+ case TEST_KMOD_FS_TYPE:
+ /* For now we make this simple */
+ if (!info->fs_sync)
+ err_ret = -EINVAL;
+ dev_info(test_dev->dev, "Sync thread %u fs: %s\n",
+ info->thread_idx, info->fs_sync ? config->test_fs :
+ "NULL");
+ break;
+ default:
+ BUG();
+ }
+
+ return err_ret;
+}
+
+/*
+ * XXX: add result option to display if all errors did not match.
+ * For now we just keep any error code if one was found.
+ *
+ * If this ran it means *all* tasks were created fine and we
+ * are now just collecting results.
+ *
+ * Only propagate errors, do not override with a subsequent sucess case.
+ */
+static void tally_up_work(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ struct kmod_test_device_info *info;
+ unsigned int idx;
+ int err_ret = 0;
+ int ret = 0;
+
+ mutex_lock(&test_dev->thread_mutex);
+
+ dev_info(test_dev->dev, "Results:\n");
+
+ for (idx=0; idx < config->num_threads; idx++) {
+ info = &test_dev->info[idx];
+ ret = tally_work_test(info);
+ if (ret)
+ err_ret = ret;
+ }
+
+ /*
+ * Note: request_module() returns 256 for a module not found even
+ * though modprobe itself returns 1.
+ */
+ config->test_result = err_ret;
+
+ mutex_unlock(&test_dev->thread_mutex);
+}
+
+static int try_one_request(struct kmod_test_device *test_dev, unsigned int idx)
+{
+ struct kmod_test_device_info *info = &test_dev->info[idx];
+ int fail_ret = -ENOMEM;
+
+ mutex_lock(&test_dev->thread_mutex);
+
+ info->thread_idx = idx;
+ info->test_dev = test_dev;
+ info->task_sync = kthread_run(run_request, info, "%s-%u",
+ KBUILD_MODNAME, idx);
+
+ if (!info->task_sync || IS_ERR(info->task_sync)) {
+ test_dev->test_is_oom = true;
+ dev_err(test_dev->dev, "Setting up thread %u failed\n", idx);
+ info->task_sync = NULL;
+ goto err_out;
+ } else
+ dev_dbg(test_dev->dev, "Kicked off thread %u\n", idx);
+
+ mutex_unlock(&test_dev->thread_mutex);
+
+ return 0;
+
+err_out:
+ info->ret_sync = fail_ret;
+ mutex_unlock(&test_dev->thread_mutex);
+
+ return fail_ret;
+}
+
+static void test_dev_kmod_stop_tests(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ struct kmod_test_device_info *info;
+ unsigned int i;
+
+ dev_info(test_dev->dev, "Ending request_module() tests\n");
+
+ mutex_lock(&test_dev->thread_mutex);
+
+ for (i=0; i < config->num_threads; i++) {
+ info = &test_dev->info[i];
+ if (info->task_sync && !IS_ERR(info->task_sync)) {
+ dev_info(test_dev->dev,
+ "Stopping still-running thread %i\n", i);
+ kthread_stop(info->task_sync);
+ }
+
+ /*
+ * info->task_sync is well protected, it can only be
+ * NULL or a pointer to a struct. If its NULL we either
+ * never ran, or we did and we completed the work. Completed
+ * tasks *always* put the module for us. This is a sanity
+ * check -- just in case.
+ */
+ if (info->task_sync && info->need_mod_put)
+ test_kmod_put_module(info);
+ }
+
+ mutex_unlock(&test_dev->thread_mutex);
+}
+
+/*
+ * Only wait *iff* we did not run into any errors during all of our thread
+ * set up. If run into any issues we stop threads and just bail out with
+ * an error to the trigger. This also means we don't need any tally work
+ * for any threads which fail.
+ */
+static int try_requests(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ unsigned int idx;
+ int ret;
+ bool any_error = false;
+
+ for (idx=0; idx < config->num_threads; idx++) {
+ if (test_dev->test_is_oom) {
+ any_error = true;
+ break;
+ }
+
+ ret = try_one_request(test_dev, idx);
+ if (ret) {
+ any_error = true;
+ break;
+ }
+ }
+
+ if (!any_error) {
+ test_dev->test_is_oom = false;
+ dev_info(test_dev->dev,
+ "No errors were found while initializing threads\n");
+ wait_for_completion(&test_dev->kthreads_done);
+ tally_up_work(test_dev);
+ } else {
+ test_dev->test_is_oom = true;
+ dev_info(test_dev->dev,
+ "At least one thread failed to start, stop all work\n");
+ test_dev_kmod_stop_tests(test_dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int run_test_driver(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ dev_info(test_dev->dev, "Test case: %s (%u)\n",
+ test_case_str(config->test_case),
+ config->test_case);
+ dev_info(test_dev->dev, "Test driver to load: %s\n",
+ config->test_driver);
+ dev_info(test_dev->dev, "Number of threads to run: %u\n",
+ config->num_threads);
+ dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
+ config->num_threads - 1);
+
+ return try_requests(test_dev);
+}
+
+static int run_test_fs_type(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ dev_info(test_dev->dev, "Test case: %s (%u)\n",
+ test_case_str(config->test_case),
+ config->test_case);
+ dev_info(test_dev->dev, "Test filesystem to load: %s\n",
+ config->test_fs);
+ dev_info(test_dev->dev, "Number of threads to run: %u\n",
+ config->num_threads);
+ dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
+ config->num_threads - 1);
+
+ return try_requests(test_dev);
+}
+
+static ssize_t config_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+ int len = 0;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ len += snprintf(buf, PAGE_SIZE,
+ "Custom trigger configuration for: %s\n",
+ dev_name(dev));
+
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "Number of threads:\t%u\n",
+ config->num_threads);
+
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "Test_case:\t%s (%u)\n",
+ test_case_str(config->test_case),
+ config->test_case);
+
+ if (config->test_driver)
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "driver:\t%s\n",
+ config->test_driver);
+ else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "driver:\tEMTPY\n");
+
+ if (config->test_fs)
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "fs:\t%s\n",
+ config->test_fs);
+ else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "fs:\tEMTPY\n");
+
+ mutex_unlock(&test_dev->config_mutex);
+
+ return len;
+}
+static DEVICE_ATTR_RO(config);
+
+/*
+ * This ensures we don't allow kicking threads through if our configuration
+ * is faulty.
+ */
+static int __trigger_config_run(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ test_dev->done = 0;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ return run_test_driver(test_dev);
+ case TEST_KMOD_FS_TYPE:
+ return run_test_fs_type(test_dev);
+ default:
+ dev_warn(test_dev->dev,
+ "Invalid test case requested: %u\n",
+ config->test_case);
+ return -EINVAL;
+ }
+}
+
+static int trigger_config_run(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ int ret;
+
+ mutex_lock(&test_dev->trigger_mutex);
+ mutex_lock(&test_dev->config_mutex);
+
+ ret = __trigger_config_run(test_dev);
+ if (ret < 0)
+ goto out;
+ dev_info(test_dev->dev, "General test result: %d\n",
+ config->test_result);
+
+ /*
+ * We must return 0 after a trigger even unless something went
+ * wrong with the setup of the test. If the test setup went fine
+ * then userspace must just check the result of config->test_result.
+ * One issue with relying on the return from a call in the kernel
+ * is if the kernel returns a possitive value using this trigger
+ * will not return the value to userspace, it would be lost.
+ *
+ * By not relying on capturing the return value of tests we are using
+ * through the trigger it also us to run tests with set -e and only
+ * fail when something went wrong with the driver upon trigger
+ * requests.
+ */
+ ret = 0;
+
+out:
+ mutex_unlock(&test_dev->config_mutex);
+ mutex_unlock(&test_dev->trigger_mutex);
+
+ return ret;
+}
+
+static ssize_t
+trigger_config_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ int ret;
+
+ if (test_dev->test_is_oom)
+ return -ENOMEM;
+
+ /* For all intents and purposes we don't care what userspace
+ * sent this trigger, we care only that we were triggered.
+ * We treat the return value only for caputuring issues with
+ * the test setup. At this point all the test variables should
+ * have been allocated so typically this should never fail.
+ */
+ ret = trigger_config_run(test_dev);
+ if (unlikely(ret < 0))
+ goto out;
+
+ /*
+ * Note: any return > 0 will be treated as success
+ * and the error value will not be available to userspace.
+ * Do not rely on trying to send to userspace a test value
+ * return value as possitive return errors will be lost.
+ */
+ if (WARN_ON(ret > 0))
+ return -EINVAL;
+
+ ret = count;
+out:
+ return ret;
+}
+static DEVICE_ATTR_WO(trigger_config);
+
+/*
+ * XXX: move to kstrncpy() once merged.
+ *
+ * Users should use kfree_const() when freeing these.
+ */
+static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
+{
+ *dst = kstrndup(name, count, gfp);
+ if (!*dst)
+ return -ENOSPC;
+ return count;
+}
+
+static int config_copy_test_driver_name(struct test_config *config,
+ const char *name,
+ size_t count)
+{
+ return __kstrncpy(&config->test_driver, name, count, GFP_KERNEL);
+}
+
+
+static int config_copy_test_fs(struct test_config *config, const char *name,
+ size_t count)
+{
+ return __kstrncpy(&config->test_fs, name, count, GFP_KERNEL);
+}
+
+static void __kmod_config_free(struct test_config *config)
+{
+ if (!config)
+ return;
+
+ kfree_const(config->test_driver);
+ config->test_driver = NULL;
+
+ kfree_const(config->test_fs);
+ config->test_driver = NULL;
+}
+
+static void kmod_config_free(struct kmod_test_device *test_dev)
+{
+ struct test_config *config;
+
+ if (!test_dev)
+ return;
+
+ config = &test_dev->config;
+
+ mutex_lock(&test_dev->config_mutex);
+ __kmod_config_free(config);
+ mutex_unlock(&test_dev->config_mutex);
+}
+
+static ssize_t config_test_driver_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+ int copied;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ kfree_const(config->test_driver);
+ config->test_driver = NULL;
+
+ copied = config_copy_test_driver_name(config, buf, count);
+ mutex_unlock(&test_dev->config_mutex);
+
+ return copied;
+}
+
+/*
+ * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
+ */
+static ssize_t config_test_show_str(struct mutex *config_mutex,
+ char *dst,
+ char *src)
+{
+ int len;
+
+ mutex_lock(config_mutex);
+ len = snprintf(dst, PAGE_SIZE, "%s\n", src);
+ mutex_unlock(config_mutex);
+
+ return len;
+}
+
+static ssize_t config_test_driver_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return config_test_show_str(&test_dev->config_mutex, buf,
+ config->test_driver);
+}
+static DEVICE_ATTR(config_test_driver, 0644, config_test_driver_show,
+ config_test_driver_store);
+
+static ssize_t config_test_fs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+ int copied;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ kfree_const(config->test_fs);
+ config->test_fs = NULL;
+
+ copied = config_copy_test_fs(config, buf, count);
+ mutex_unlock(&test_dev->config_mutex);
+
+ return copied;
+}
+
+static ssize_t config_test_fs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return config_test_show_str(&test_dev->config_mutex, buf,
+ config->test_fs);
+}
+static DEVICE_ATTR(config_test_fs, 0644, config_test_fs_show,
+ config_test_fs_store);
+
+static int trigger_config_run_type(struct kmod_test_device *test_dev,
+ enum kmod_test_case test_case,
+ const char *test_str)
+{
+ int copied = 0;
+ struct test_config *config = &test_dev->config;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ switch (test_case) {
+ case TEST_KMOD_DRIVER:
+ kfree_const(config->test_driver);
+ config->test_driver = NULL;
+ copied = config_copy_test_driver_name(config, test_str,
+ strlen(test_str));
+ break;
+ case TEST_KMOD_FS_TYPE:
+ break;
+ kfree_const(config->test_fs);
+ config->test_driver = NULL;
+ copied = config_copy_test_fs(config, test_str,
+ strlen(test_str));
+ default:
+ mutex_unlock(&test_dev->config_mutex);
+ return -EINVAL;
+ }
+
+ config->test_case = test_case;
+
+ mutex_unlock(&test_dev->config_mutex);
+
+ if (copied <= 0 || copied != strlen(test_str)) {
+ test_dev->test_is_oom = true;
+ return -ENOMEM;
+ }
+
+ test_dev->test_is_oom = false;
+
+ return trigger_config_run(test_dev);
+}
+
+static void free_test_dev_info(struct kmod_test_device *test_dev)
+{
+ vfree(test_dev->info);
+ test_dev->info = NULL;
+}
+
+static int kmod_config_sync_info(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ free_test_dev_info(test_dev);
+ test_dev->info = vzalloc(config->num_threads *
+ sizeof(struct kmod_test_device_info));
+ if (!test_dev->info) {
+ dev_err(test_dev->dev, "Cannot alloc test_dev info\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Old kernels may not have this, if you want to port this code to
+ * test it on older kernels.
+ */
+#ifdef get_kmod_umh_limit
+static unsigned int kmod_init_test_thread_limit(void)
+{
+ return get_kmod_umh_limit();
+}
+#else
+static unsigned int kmod_init_test_thread_limit(void)
+{
+ return TEST_START_NUM_THREADS;
+}
+#endif
+
+static int __kmod_config_init(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ int ret = -ENOMEM, copied;
+
+ __kmod_config_free(config);
+
+ copied = config_copy_test_driver_name(config, TEST_START_DRIVER,
+ strlen(TEST_START_DRIVER));
+ if (copied != strlen(TEST_START_DRIVER))
+ goto err_out;
+
+ copied = config_copy_test_fs(config, TEST_START_TEST_FS,
+ strlen(TEST_START_TEST_FS));
+ if (copied != strlen(TEST_START_TEST_FS))
+ goto err_out;
+
+ config->num_threads = kmod_init_test_thread_limit();
+ config->test_result = 0;
+ config->test_case = TEST_START_TEST_CASE;
+
+ ret = kmod_config_sync_info(test_dev);
+ if (ret)
+ goto err_out;
+
+ test_dev->test_is_oom = false;
+
+ return 0;
+
+err_out:
+ test_dev->test_is_oom = true;
+ WARN_ON(test_dev->test_is_oom);
+
+ __kmod_config_free(config);
+
+ return ret;
+}
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ int ret;
+
+ mutex_lock(&test_dev->trigger_mutex);
+ mutex_lock(&test_dev->config_mutex);
+
+ ret = __kmod_config_init(test_dev);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ dev_err(dev, "could not alloc settings for config trigger: %d\n",
+ ret);
+ goto out;
+ }
+
+ dev_info(dev, "reset\n");
+ ret = count;
+
+out:
+ mutex_unlock(&test_dev->config_mutex);
+ mutex_unlock(&test_dev->trigger_mutex);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(reset);
+
+static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
+ const char *buf, size_t size,
+ unsigned int *config,
+ int (*test_sync)(struct kmod_test_device *test_dev))
+{
+ int ret;
+ long new;
+ unsigned int old_val;
+
+ ret = kstrtol(buf, 10, &new);
+ if (ret)
+ return ret;
+
+ if (new > UINT_MAX)
+ return -EINVAL;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ old_val = *config;
+ *(unsigned int *)config = new;
+
+ ret = test_sync(test_dev);
+ if (ret) {
+ *(unsigned int *)config = old_val;
+
+ ret = test_sync(test_dev);
+ WARN_ON(ret);
+
+ mutex_unlock(&test_dev->config_mutex);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&test_dev->config_mutex);
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
+ const char *buf, size_t size,
+ unsigned int *config,
+ unsigned int min,
+ unsigned int max)
+{
+ int ret;
+ long new;
+
+ ret = kstrtol(buf, 10, &new);
+ if (ret)
+ return ret;
+
+ if (new < min || new > max || new > UINT_MAX)
+ return -EINVAL;
+
+ mutex_lock(&test_dev->config_mutex);
+ *config = new;
+ mutex_unlock(&test_dev->config_mutex);
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static int test_dev_config_update_int(struct kmod_test_device *test_dev,
+ const char *buf, size_t size,
+ int *config)
+{
+ int ret;
+ long new;
+
+ ret = kstrtol(buf, 10, &new);
+ if (ret)
+ return ret;
+
+ if (new > INT_MAX || new < INT_MIN)
+ return -EINVAL;
+
+ mutex_lock(&test_dev->config_mutex);
+ *config = new;
+ mutex_unlock(&test_dev->config_mutex);
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static ssize_t test_dev_config_show_int(struct kmod_test_device *test_dev,
+ char *buf,
+ int config)
+{
+ int val;
+
+ mutex_lock(&test_dev->config_mutex);
+ val = config;
+ mutex_unlock(&test_dev->config_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t test_dev_config_show_uint(struct kmod_test_device *test_dev,
+ char *buf,
+ unsigned int config)
+{
+ unsigned int val;
+
+ mutex_lock(&test_dev->config_mutex);
+ val = config;
+ mutex_unlock(&test_dev->config_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t test_result_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_update_int(test_dev, buf, count,
+ &config->test_result);
+}
+
+static ssize_t config_num_threads_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_update_uint_sync(test_dev, buf, count,
+ &config->num_threads,
+ kmod_config_sync_info);
+}
+
+static ssize_t config_num_threads_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_show_int(test_dev, buf, config->num_threads);
+}
+static DEVICE_ATTR(config_num_threads, 0644, config_num_threads_show,
+ config_num_threads_store);
+
+static ssize_t config_test_case_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_update_uint_range(test_dev, buf, count,
+ &config->test_case,
+ __TEST_KMOD_INVALID + 1,
+ __TEST_KMOD_MAX - 1);
+}
+
+static ssize_t config_test_case_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_show_uint(test_dev, buf, config->test_case);
+}
+static DEVICE_ATTR(config_test_case, 0644, config_test_case_show,
+ config_test_case_store);
+
+static ssize_t test_result_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_show_int(test_dev, buf, config->test_result);
+}
+static DEVICE_ATTR(test_result, 0644, test_result_show, test_result_store);
+
+#define TEST_KMOD_DEV_ATTR(name) &dev_attr_##name.attr
+
+static struct attribute *test_dev_attrs[] = {
+ TEST_KMOD_DEV_ATTR(trigger_config),
+ TEST_KMOD_DEV_ATTR(config),
+ TEST_KMOD_DEV_ATTR(reset),
+
+ TEST_KMOD_DEV_ATTR(config_test_driver),
+ TEST_KMOD_DEV_ATTR(config_test_fs),
+ TEST_KMOD_DEV_ATTR(config_num_threads),
+ TEST_KMOD_DEV_ATTR(config_test_case),
+ TEST_KMOD_DEV_ATTR(test_result),
+
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(test_dev);
+
+static int kmod_config_init(struct kmod_test_device *test_dev)
+{
+ int ret;
+
+ mutex_lock(&test_dev->config_mutex);
+ ret = __kmod_config_init(test_dev);
+ mutex_unlock(&test_dev->config_mutex);
+
+ return ret;
+}
+
+static struct kmod_test_device *alloc_test_dev_kmod(int idx)
+{
+ int ret;
+ struct kmod_test_device *test_dev;
+ struct miscdevice *misc_dev;
+
+ test_dev = vzalloc(sizeof(struct kmod_test_device));
+ if (!test_dev) {
+ pr_err("Cannot alloc test_dev\n");
+ goto err_out;
+ }
+
+ mutex_init(&test_dev->config_mutex);
+ mutex_init(&test_dev->trigger_mutex);
+ mutex_init(&test_dev->thread_mutex);
+
+ init_completion(&test_dev->kthreads_done);
+
+ ret = kmod_config_init(test_dev);
+ if (ret < 0) {
+ pr_err("Cannot alloc kmod_config_init()\n");
+ goto err_out_free;
+ }
+
+ test_dev->dev_idx = idx;
+ misc_dev = &test_dev->misc_dev;
+
+ misc_dev->minor = MISC_DYNAMIC_MINOR;
+ misc_dev->name = kasprintf(GFP_KERNEL, "test_kmod%d", idx);
+ if (!misc_dev->name) {
+ pr_err("Cannot alloc misc_dev->name\n");
+ goto err_out_free_config;
+ }
+ misc_dev->groups = test_dev_groups;
+
+ return test_dev;
+
+err_out_free_config:
+ free_test_dev_info(test_dev);
+ kmod_config_free(test_dev);
+err_out_free:
+ vfree(test_dev);
+ test_dev = NULL;
+err_out:
+ return NULL;
+}
+
+static void free_test_dev_kmod(struct kmod_test_device *test_dev)
+{
+ if (test_dev) {
+ kfree_const(test_dev->misc_dev.name);
+ test_dev->misc_dev.name = NULL;
+ free_test_dev_info(test_dev);
+ kmod_config_free(test_dev);
+ vfree(test_dev);
+ test_dev = NULL;
+ }
+}
+
+static struct kmod_test_device *register_test_dev_kmod(void)
+{
+ struct kmod_test_device *test_dev = NULL;
+ int ret;
+
+ mutex_unlock(&reg_dev_mutex);
+
+ /* int should suffice for number of devices, test for wrap */
+ if (unlikely(num_test_devs + 1) < 0) {
+ pr_err("reached limit of number of test devices\n");
+ goto out;
+ }
+
+ test_dev = alloc_test_dev_kmod(num_test_devs);
+ if (!test_dev)
+ goto out;
+
+ ret = misc_register(&test_dev->misc_dev);
+ if (ret) {
+ pr_err("could not register misc device: %d\n", ret);
+ free_test_dev_kmod(test_dev);
+ goto out;
+ }
+
+ test_dev->dev = test_dev->misc_dev.this_device;
+ list_add_tail(&test_dev->list, &reg_test_devs);
+ dev_info(test_dev->dev, "interface ready\n");
+
+ num_test_devs++;
+
+out:
+ mutex_unlock(&reg_dev_mutex);
+
+ return test_dev;
+
+}
+
+static int __init test_kmod_init(void)
+{
+ struct kmod_test_device *test_dev;
+ int ret;
+
+ test_dev = register_test_dev_kmod();
+ if (!test_dev) {
+ pr_err("Cannot add first test kmod device\n");
+ return -ENODEV;
+ }
+
+ /*
+ * With some work we might be able to gracefully enable
+ * testing with this driver built-in, for now this seems
+ * rather risky. For those willing to try have at it,
+ * and enable the below. Good luck! If that works, try
+ * lowering the init level for more fun.
+ */
+ if (force_init_test) {
+ ret = trigger_config_run_type(test_dev,
+ TEST_KMOD_DRIVER, "tun");
+ if (WARN_ON(ret))
+ return ret;
+ ret = trigger_config_run_type(test_dev,
+ TEST_KMOD_FS_TYPE, "btrfs");
+ if (WARN_ON(ret))
+ return ret;
+ }
+
+ return 0;
+}
+late_initcall(test_kmod_init);
+
+static
+void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
+{
+ mutex_lock(&test_dev->trigger_mutex);
+ mutex_lock(&test_dev->config_mutex);
+
+ test_dev_kmod_stop_tests(test_dev);
+
+ dev_info(test_dev->dev, "removing interface\n");
+ misc_deregister(&test_dev->misc_dev);
+ kfree(&test_dev->misc_dev.name);
+
+ mutex_unlock(&test_dev->config_mutex);
+ mutex_unlock(&test_dev->trigger_mutex);
+
+ free_test_dev_kmod(test_dev);
+}
+
+static void __exit test_kmod_exit(void)
+{
+ struct kmod_test_device *test_dev, *tmp;
+
+ mutex_lock(&reg_dev_mutex);
+ list_for_each_entry_safe(test_dev, tmp, &reg_test_devs, list) {
+ list_del(&test_dev->list);
+ unregister_test_dev_kmod(test_dev);
+ }
+ mutex_unlock(&reg_dev_mutex);
+}
+module_exit(test_kmod_exit);
+
+MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
new file mode 100644
index 000000000000..3dd801c1c85b
--- /dev/null
+++ b/lib/test_sysctl.c
@@ -0,0 +1,148 @@
+/*
+ * proc sysctl test driver
+ *
+ * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or at your option any
+ * later version; or, when distributed separately from the Linux kernel or
+ * when incorporated into other software packages, subject to the following
+ * license:
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of copyleft-next (version 0.3.1 or later) as published
+ * at http://copyleft-next.org/.
+ */
+
+/*
+ * This module provides an interface to the the proc sysctl interfaces. This
+ * driver requires CONFIG_PROC_SYSCTL. It will not normally be loaded by the
+ * system unless explicitly requested by name. You can also build this driver
+ * into your kernel.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+
+static int i_zero;
+static int i_one_hundred = 100;
+
+struct test_sysctl_data {
+ int int_0001;
+ int int_0002;
+ int int_0003[4];
+
+ unsigned int uint_0001;
+
+ char string_0001[65];
+};
+
+static struct test_sysctl_data test_data = {
+ .int_0001 = 60,
+ .int_0002 = 1,
+
+ .int_0003[0] = 0,
+ .int_0003[1] = 1,
+ .int_0003[2] = 2,
+ .int_0003[3] = 3,
+
+ .uint_0001 = 314,
+
+ .string_0001 = "(none)",
+};
+
+/* These are all under /proc/sys/debug/test_sysctl/ */
+static struct ctl_table test_table[] = {
+ {
+ .procname = "int_0001",
+ .data = &test_data.int_0001,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ },
+ {
+ .procname = "int_0002",
+ .data = &test_data.int_0002,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "int_0003",
+ .data = &test_data.int_0003,
+ .maxlen = sizeof(test_data.int_0003),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "uint_0001",
+ .data = &test_data.uint_0001,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+ {
+ .procname = "string_0001",
+ .data = &test_data.string_0001,
+ .maxlen = sizeof(test_data.string_0001),
+ .mode = 0644,
+ .proc_handler = proc_dostring,
+ },
+ { }
+};
+
+static struct ctl_table test_sysctl_table[] = {
+ {
+ .procname = "test_sysctl",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = test_table,
+ },
+ { }
+};
+
+static struct ctl_table test_sysctl_root_table[] = {
+ {
+ .procname = "debug",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = test_sysctl_table,
+ },
+ { }
+};
+
+static struct ctl_table_header *test_sysctl_header;
+
+static int __init test_sysctl_init(void)
+{
+ test_sysctl_header = register_sysctl_table(test_sysctl_root_table);
+ if (!test_sysctl_header)
+ return -ENOMEM;
+ return 0;
+}
+late_initcall(test_sysctl_init);
+
+static void __exit test_sysctl_exit(void)
+{
+ if (test_sysctl_header)
+ unregister_sysctl_table(test_sysctl_header);
+}
+
+module_exit(test_sysctl_exit);
+
+MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1e516520433d..bc48ee783dd9 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1384,7 +1384,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
page = __alloc_pages_node(nid,
htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
- __GFP_REPEAT|__GFP_NOWARN,
+ __GFP_RETRY_MAYFAIL|__GFP_NOWARN,
huge_page_order(h));
if (page) {
prep_new_huge_page(h, page, nid);
@@ -1525,7 +1525,7 @@ static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
{
int order = huge_page_order(h);
- gfp_mask |= __GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
+ gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
return __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
diff --git a/mm/internal.h b/mm/internal.h
index 0e4f558412fb..24d88f084705 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -23,7 +23,7 @@
* hints such as HIGHMEM usage.
*/
#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
- __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
+ __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
__GFP_ATOMIC)
diff --git a/mm/memory.c b/mm/memory.c
index cbb57194687e..0e517be91a89 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3591,7 +3591,7 @@ out:
return 0;
}
-static int create_huge_pmd(struct vm_fault *vmf)
+static inline int create_huge_pmd(struct vm_fault *vmf)
{
if (vma_is_anonymous(vmf->vma))
return do_huge_pmd_anonymous_page(vmf);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 7d8e56214ac0..d911fa5cb2a7 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1078,7 +1078,8 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
/*
* if !vma, alloc_page_vma() will use task or system default policy
*/
- return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
+ vma, address);
}
#else
diff --git a/mm/mmap.c b/mm/mmap.c
index 7fa6759322d1..f19efcf75418 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2231,7 +2231,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
/* Guard against exceeding limits of the address space. */
address &= PAGE_MASK;
- if (address >= TASK_SIZE)
+ if (address >= (TASK_SIZE & PAGE_MASK))
return -ENOMEM;
address += PAGE_SIZE;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0b60cc7ddac2..96e93b214d31 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -601,7 +601,7 @@ static inline void __wb_writeout_inc(struct bdi_writeback *wb)
{
struct wb_domain *cgdom;
- __inc_wb_stat(wb, WB_WRITTEN);
+ inc_wb_stat(wb, WB_WRITTEN);
wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
wb->bdi->max_prop_frac);
@@ -2435,8 +2435,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
__inc_lruvec_page_state(page, NR_FILE_DIRTY);
__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
__inc_node_page_state(page, NR_DIRTIED);
- __inc_wb_stat(wb, WB_RECLAIMABLE);
- __inc_wb_stat(wb, WB_DIRTIED);
+ inc_wb_stat(wb, WB_RECLAIMABLE);
+ inc_wb_stat(wb, WB_DIRTIED);
task_io_account_write(PAGE_SIZE);
current->nr_dirtied++;
this_cpu_inc(bdp_ratelimits);
@@ -2741,7 +2741,7 @@ int test_clear_page_writeback(struct page *page)
if (bdi_cap_account_writeback(bdi)) {
struct bdi_writeback *wb = inode_to_wb(inode);
- __dec_wb_stat(wb, WB_WRITEBACK);
+ dec_wb_stat(wb, WB_WRITEBACK);
__wb_writeout_inc(wb);
}
}
@@ -2786,7 +2786,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
page_index(page),
PAGECACHE_TAG_WRITEBACK);
if (bdi_cap_account_writeback(bdi))
- __inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
+ inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
/*
* We can come through here when swapping anonymous
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 64b7d82a9b1a..6d30e914afb6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3284,6 +3284,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
/* The OOM killer will not help higher order allocs */
if (order > PAGE_ALLOC_COSTLY_ORDER)
goto out;
+ /*
+ * We have already exhausted all our reclaim opportunities without any
+ * success so it is time to admit defeat. We will skip the OOM killer
+ * because it is very likely that the caller has a more reasonable
+ * fallback than shooting a random task.
+ */
+ if (gfp_mask & __GFP_RETRY_MAYFAIL)
+ goto out;
/* The OOM killer does not needlessly kill tasks for lowmem */
if (ac->high_zoneidx < ZONE_NORMAL)
goto out;
@@ -3413,7 +3421,7 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
}
/*
- * !costly requests are much more important than __GFP_REPEAT
+ * !costly requests are much more important than __GFP_RETRY_MAYFAIL
* costly ones because they are de facto nofail and invoke OOM
* killer to move on while costly can fail and users are ready
* to cope with that. 1/4 retries is rather arbitrary but we
@@ -3920,9 +3928,9 @@ retry:
/*
* Do not retry costly high order allocations unless they are
- * __GFP_REPEAT
+ * __GFP_RETRY_MAYFAIL
*/
- if (costly_order && !(gfp_mask & __GFP_REPEAT))
+ if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
goto nopage;
if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index a56c3989f773..c50b1a14d55e 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -56,11 +56,11 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
if (node_state(node, N_HIGH_MEMORY))
page = alloc_pages_node(
- node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
+ node, GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
get_order(size));
else
page = alloc_pages(
- GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
+ GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
get_order(size));
if (page)
return page_address(page);
diff --git a/mm/util.c b/mm/util.c
index 26be6407abd7..7b07ec852e01 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -83,6 +83,8 @@ EXPORT_SYMBOL(kstrdup_const);
* @s: the string to duplicate
* @max: read at most @max chars from @s
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Note: Use kmemdup_nul() instead if the size is known exactly.
*/
char *kstrndup(const char *s, size_t max, gfp_t gfp)
{
@@ -121,6 +123,28 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
EXPORT_SYMBOL(kmemdup);
/**
+ * kmemdup_nul - Create a NUL-terminated string from unterminated data
+ * @s: The data to stringify
+ * @len: The size of the data
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ */
+char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
+{
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ buf = kmalloc_track_caller(len + 1, gfp);
+ if (buf) {
+ memcpy(buf, s, len);
+ buf[len] = '\0';
+ }
+ return buf;
+}
+EXPORT_SYMBOL(kmemdup_nul);
+
+/**
* memdup_user - duplicate memory region from user space
*
* @src: source address in user space
@@ -339,9 +363,9 @@ EXPORT_SYMBOL(vm_mmap);
* Uses kmalloc to get the memory but if the allocation fails then falls back
* to the vmalloc allocator. Use kvfree for freeing the memory.
*
- * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. __GFP_REPEAT
- * is supported only for large (>32kB) allocations, and it should be used only if
- * kmalloc is preferable to the vmalloc fallback, due to visible performance drawbacks.
+ * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
+ * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
+ * preferable to the vmalloc fallback, due to visible performance drawbacks.
*
* Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people.
*/
@@ -366,13 +390,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
if (size > PAGE_SIZE) {
kmalloc_flags |= __GFP_NOWARN;
- /*
- * We have to override __GFP_REPEAT by __GFP_NORETRY for !costly
- * requests because there is no other way to tell the allocator
- * that we want to fail rather than retry endlessly.
- */
- if (!(kmalloc_flags & __GFP_REPEAT) ||
- (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+ if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
kmalloc_flags |= __GFP_NORETRY;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6016ab079e2b..8698c1c86c4d 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1795,7 +1795,7 @@ fail:
* allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot.
*
- * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_REPEAT
+ * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
* and __GFP_NOFAIL are not supported
*
* Any use of gfp flags outside of GFP_KERNEL should be consulted
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e9210f825219..a1af041930a6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2506,18 +2506,18 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
return false;
/* Consider stopping depending on scan and reclaim activity */
- if (sc->gfp_mask & __GFP_REPEAT) {
+ if (sc->gfp_mask & __GFP_RETRY_MAYFAIL) {
/*
- * For __GFP_REPEAT allocations, stop reclaiming if the
+ * For __GFP_RETRY_MAYFAIL allocations, stop reclaiming if the
* full LRU list has been scanned and we are still failing
* to reclaim pages. This full LRU scan is potentially
- * expensive but a __GFP_REPEAT caller really wants to succeed
+ * expensive but a __GFP_RETRY_MAYFAIL caller really wants to succeed
*/
if (!nr_reclaimed && !nr_scanned)
return false;
} else {
/*
- * For non-__GFP_REPEAT allocations which can presumably
+ * For non-__GFP_RETRY_MAYFAIL allocations which can presumably
* fail without consequence, stop if we failed to reclaim
* any pages from the last SWAP_CLUSTER_MAX number of
* pages that were scanned. This will return to the
diff --git a/net/9p/client.c b/net/9p/client.c
index 1218fb3b52da..4674235b0d9b 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -37,6 +37,7 @@
#include <linux/uio.h>
#include <net/9p/9p.h>
#include <linux/parser.h>
+#include <linux/seq_file.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
#include "protocol.h"
@@ -77,6 +78,30 @@ inline int p9_is_proto_dotu(struct p9_client *clnt)
}
EXPORT_SYMBOL(p9_is_proto_dotu);
+int p9_show_client_options(struct seq_file *m, struct p9_client *clnt)
+{
+ if (clnt->msize != 8192)
+ seq_printf(m, ",msize=%u", clnt->msize);
+ seq_printf(m, "trans=%s", clnt->trans_mod->name);
+
+ switch (clnt->proto_version) {
+ case p9_proto_legacy:
+ seq_puts(m, ",noextend");
+ break;
+ case p9_proto_2000u:
+ seq_puts(m, ",version=9p2000.u");
+ break;
+ case p9_proto_2000L:
+ /* Default */
+ break;
+ }
+
+ if (clnt->trans_mod->show_options)
+ return clnt->trans_mod->show_options(m, clnt);
+ return 0;
+}
+EXPORT_SYMBOL(p9_show_client_options);
+
/*
* Some error codes are taken directly from the server replies,
* make sure they are valid.
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index dca3cdd1a014..ddfa86648f95 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -41,6 +41,7 @@
#include <linux/file.h>
#include <linux/parser.h>
#include <linux/slab.h>
+#include <linux/seq_file.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
@@ -51,6 +52,9 @@
#define MAX_SOCK_BUF (64*1024)
#define MAXPOLLWADDR 2
+static struct p9_trans_module p9_tcp_trans;
+static struct p9_trans_module p9_fd_trans;
+
/**
* struct p9_fd_opts - per-transport options
* @rfd: file descriptor for reading (trans=fd)
@@ -63,7 +67,7 @@ struct p9_fd_opts {
int rfd;
int wfd;
u16 port;
- int privport;
+ bool privport;
};
/*
@@ -720,6 +724,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
return 0;
}
+static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt)
+{
+ if (clnt->trans_mod == &p9_tcp_trans) {
+ if (clnt->trans_opts.tcp.port != P9_PORT)
+ seq_printf(m, "port=%u", clnt->trans_opts.tcp.port);
+ } else if (clnt->trans_mod == &p9_fd_trans) {
+ if (clnt->trans_opts.fd.rfd != ~0)
+ seq_printf(m, "rfd=%u", clnt->trans_opts.fd.rfd);
+ if (clnt->trans_opts.fd.wfd != ~0)
+ seq_printf(m, "wfd=%u", clnt->trans_opts.fd.wfd);
+ }
+ return 0;
+}
+
/**
* parse_opts - parse mount options into p9_fd_opts structure
* @params: options string passed from mount
@@ -738,7 +756,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
opts->port = P9_PORT;
opts->rfd = ~0;
opts->wfd = ~0;
- opts->privport = 0;
+ opts->privport = false;
if (!params)
return 0;
@@ -776,7 +794,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
opts->wfd = option;
break;
case Opt_privport:
- opts->privport = 1;
+ opts->privport = true;
break;
default:
continue;
@@ -942,6 +960,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
csocket = NULL;
+ client->trans_opts.tcp.port = opts.port;
+ client->trans_opts.tcp.privport = opts.privport;
sin_server.sin_family = AF_INET;
sin_server.sin_addr.s_addr = in_aton(addr);
sin_server.sin_port = htons(opts.port);
@@ -1020,6 +1040,8 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
struct p9_fd_opts opts;
parse_opts(args, &opts);
+ client->trans_opts.fd.rfd = opts.rfd;
+ client->trans_opts.fd.wfd = opts.wfd;
if (opts.rfd == ~0 || opts.wfd == ~0) {
pr_err("Insufficient options for proto=fd\n");
@@ -1044,6 +1066,7 @@ static struct p9_trans_module p9_tcp_trans = {
.request = p9_fd_request,
.cancel = p9_fd_cancel,
.cancelled = p9_fd_cancelled,
+ .show_options = p9_fd_show_options,
.owner = THIS_MODULE,
};
@@ -1056,6 +1079,7 @@ static struct p9_trans_module p9_unix_trans = {
.request = p9_fd_request,
.cancel = p9_fd_cancel,
.cancelled = p9_fd_cancelled,
+ .show_options = p9_fd_show_options,
.owner = THIS_MODULE,
};
@@ -1068,6 +1092,7 @@ static struct p9_trans_module p9_fd_trans = {
.request = p9_fd_request,
.cancel = p9_fd_cancel,
.cancelled = p9_fd_cancelled,
+ .show_options = p9_fd_show_options,
.owner = THIS_MODULE,
};
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 553ed4ecb6a0..6d8e3031978f 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -43,6 +43,7 @@
#include <linux/parser.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
+#include <linux/seq_file.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
@@ -70,6 +71,8 @@
* @dm_mr: DMA Memory Region pointer
* @lkey: The local access only memory region key
* @timeout: Number of uSecs to wait for connection management events
+ * @privport: Whether a privileged port may be used
+ * @port: The port to use
* @sq_depth: The depth of the Send Queue
* @sq_sem: Semaphore for the SQ
* @rq_depth: The depth of the Receive Queue.
@@ -95,6 +98,8 @@ struct p9_trans_rdma {
struct ib_qp *qp;
struct ib_cq *cq;
long timeout;
+ bool privport;
+ u16 port;
int sq_depth;
struct semaphore sq_sem;
int rq_depth;
@@ -133,10 +138,10 @@ struct p9_rdma_context {
*/
struct p9_rdma_opts {
short port;
+ bool privport;
int sq_depth;
int rq_depth;
long timeout;
- int privport;
};
/*
@@ -159,6 +164,23 @@ static match_table_t tokens = {
{Opt_err, NULL},
};
+static int p9_rdma_show_options(struct seq_file *m, struct p9_client *clnt)
+{
+ struct p9_trans_rdma *rdma = clnt->trans;
+
+ if (rdma->port != P9_PORT)
+ seq_printf(m, ",port=%u", rdma->port);
+ if (rdma->sq_depth != P9_RDMA_SQ_DEPTH)
+ seq_printf(m, ",sq=%u", rdma->sq_depth);
+ if (rdma->rq_depth != P9_RDMA_RQ_DEPTH)
+ seq_printf(m, ",rq=%u", rdma->rq_depth);
+ if (rdma->timeout != P9_RDMA_TIMEOUT)
+ seq_printf(m, ",timeout=%lu", rdma->timeout);
+ if (rdma->privport)
+ seq_puts(m, ",privport");
+ return 0;
+}
+
/**
* parse_opts - parse mount options into rdma options structure
* @params: options string passed from mount
@@ -177,7 +199,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
opts->sq_depth = P9_RDMA_SQ_DEPTH;
opts->rq_depth = P9_RDMA_RQ_DEPTH;
opts->timeout = P9_RDMA_TIMEOUT;
- opts->privport = 0;
+ opts->privport = false;
if (!params)
return 0;
@@ -218,7 +240,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
opts->timeout = option;
break;
case Opt_privport:
- opts->privport = 1;
+ opts->privport = true;
break;
default:
continue;
@@ -560,6 +582,8 @@ static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
if (!rdma)
return NULL;
+ rdma->port = opts->port;
+ rdma->privport = opts->privport;
rdma->sq_depth = opts->sq_depth;
rdma->rq_depth = opts->rq_depth;
rdma->timeout = opts->timeout;
@@ -733,6 +757,7 @@ static struct p9_trans_module p9_rdma_trans = {
.request = rdma_request,
.cancel = rdma_cancel,
.cancelled = rdma_cancelled,
+ .show_options = p9_rdma_show_options,
};
/**
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index f0f3447e8aa4..861ae2a165f4 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -34,11 +34,11 @@ static struct lock_class_key bridge_netdev_addr_lock_key;
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- const unsigned char *dest = skb->data;
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
const struct nf_br_ops *nf_ops;
+ const unsigned char *dest;
u16 vid = 0;
rcu_read_lock();
@@ -61,6 +61,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
goto out;
+ dest = eth_hdr(skb)->h_dest;
if (is_broadcast_ether_addr(dest)) {
br_flood(br, skb, BR_PKT_BROADCAST, false, true);
} else if (is_multicast_ether_addr(dest)) {
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 013f2290bfa5..7637f58c1226 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -131,11 +131,11 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
- const unsigned char *dest = eth_hdr(skb)->h_dest;
enum br_pkt_type pkt_type = BR_PKT_UNICAST;
struct net_bridge_fdb_entry *dst = NULL;
struct net_bridge_mdb_entry *mdst;
bool local_rcv, mcast_hit = false;
+ const unsigned char *dest;
struct net_bridge *br;
u16 vid = 0;
@@ -153,6 +153,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
local_rcv = !!(br->dev->flags & IFF_PROMISC);
+ dest = eth_hdr(skb)->h_dest;
if (is_multicast_ether_addr(dest)) {
/* by definition the broadcast is also a multicast address */
if (is_broadcast_ether_addr(dest)) {
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 3d265c5cb6d0..5c036d2f401e 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -599,7 +599,11 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
{
struct ceph_client *client;
struct ceph_entity_addr *myaddr = NULL;
- int err = -ENOMEM;
+ int err;
+
+ err = wait_for_random_bytes();
+ if (err < 0)
+ return ERR_PTR(err);
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (client == NULL)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 0c31035bbfee..b7cc615d42ef 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -3203,8 +3203,10 @@ static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
return NULL;
data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS);
- if (data)
- data->type = type;
+ if (!data)
+ return NULL;
+
+ data->type = type;
INIT_LIST_HEAD(&data->links);
return data;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 86a9737d8e3f..901bb8221366 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -5310,7 +5310,10 @@ static int invalidate_authorizer(struct ceph_connection *con)
static void osd_reencode_message(struct ceph_msg *msg)
{
- encode_request_finish(msg);
+ int type = le16_to_cpu(msg->hdr.type);
+
+ if (type == CEPH_MSG_OSD_OP)
+ encode_request_finish(msg);
}
static int osd_sign_message(struct ceph_msg *msg)
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 864789c5974e..64ae9f89773a 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -338,7 +338,7 @@ static void crush_finalize(struct crush_map *c)
static struct crush_map *crush_decode(void *pbyval, void *end)
{
struct crush_map *c;
- int err = -EINVAL;
+ int err;
int i, j;
void **p = &pbyval;
void *start = pbyval;
@@ -407,7 +407,6 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
size = sizeof(struct crush_bucket_straw2);
break;
default:
- err = -EINVAL;
goto bad;
}
BUG_ON(size == 0);
@@ -439,31 +438,31 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
err = crush_decode_uniform_bucket(p, end,
(struct crush_bucket_uniform *)b);
if (err < 0)
- goto bad;
+ goto fail;
break;
case CRUSH_BUCKET_LIST:
err = crush_decode_list_bucket(p, end,
(struct crush_bucket_list *)b);
if (err < 0)
- goto bad;
+ goto fail;
break;
case CRUSH_BUCKET_TREE:
err = crush_decode_tree_bucket(p, end,
(struct crush_bucket_tree *)b);
if (err < 0)
- goto bad;
+ goto fail;
break;
case CRUSH_BUCKET_STRAW:
err = crush_decode_straw_bucket(p, end,
(struct crush_bucket_straw *)b);
if (err < 0)
- goto bad;
+ goto fail;
break;
case CRUSH_BUCKET_STRAW2:
err = crush_decode_straw2_bucket(p, end,
(struct crush_bucket_straw2 *)b);
if (err < 0)
- goto bad;
+ goto fail;
break;
}
}
@@ -474,7 +473,6 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
u32 yes;
struct crush_rule *r;
- err = -EINVAL;
ceph_decode_32_safe(p, end, yes, bad);
if (!yes) {
dout("crush_decode NO rule %d off %x %p to %p\n",
@@ -489,7 +487,6 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
/* len */
ceph_decode_32_safe(p, end, yes, bad);
#if BITS_PER_LONG == 32
- err = -EINVAL;
if (yes > (ULONG_MAX - sizeof(*r))
/ sizeof(struct crush_rule_step))
goto bad;
@@ -557,7 +554,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
if (*p != end) {
err = decode_choose_args(p, end, c);
if (err)
- goto bad;
+ goto fail;
}
done:
@@ -567,10 +564,14 @@ done:
badmem:
err = -ENOMEM;
-bad:
+fail:
dout("crush_decode fail %d\n", err);
crush_destroy(c);
return ERR_PTR(err);
+
+bad:
+ err = -EINVAL;
+ goto fail;
}
int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
@@ -1399,7 +1400,7 @@ static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
return ERR_PTR(-EINVAL);
ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
- pg = kzalloc(sizeof(*pg) + 2 * len * sizeof(u32), GFP_NOIO);
+ pg = alloc_pg_mapping(2 * len * sizeof(u32));
if (!pg)
return ERR_PTR(-ENOMEM);
@@ -1544,7 +1545,7 @@ static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map)
if (struct_v >= 3) {
/* erasure_code_profiles */
ceph_decode_skip_map_of_map(p, end, string, string, string,
- bad);
+ e_inval);
}
if (struct_v >= 4) {
@@ -1825,9 +1826,9 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
if (struct_v >= 3) {
/* new_erasure_code_profiles */
ceph_decode_skip_map_of_map(p, end, string, string, string,
- bad);
+ e_inval);
/* old_erasure_code_profiles */
- ceph_decode_skip_set(p, end, string, bad);
+ ceph_decode_skip_set(p, end, string, e_inval);
}
if (struct_v >= 4) {
diff --git a/net/compat.c b/net/compat.c
index aba929e5250f..6ded6c821d7a 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -37,21 +37,16 @@ int get_compat_msghdr(struct msghdr *kmsg,
struct sockaddr __user **save_addr,
struct iovec **iov)
{
- compat_uptr_t uaddr, uiov, tmp3;
- compat_size_t nr_segs;
+ struct compat_msghdr msg;
ssize_t err;
- if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) ||
- __get_user(uaddr, &umsg->msg_name) ||
- __get_user(kmsg->msg_namelen, &umsg->msg_namelen) ||
- __get_user(uiov, &umsg->msg_iov) ||
- __get_user(nr_segs, &umsg->msg_iovlen) ||
- __get_user(tmp3, &umsg->msg_control) ||
- __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
- __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ if (copy_from_user(&msg, umsg, sizeof(*umsg)))
return -EFAULT;
- if (!uaddr)
+ kmsg->msg_flags = msg.msg_flags;
+ kmsg->msg_namelen = msg.msg_namelen;
+
+ if (!msg.msg_name)
kmsg->msg_namelen = 0;
if (kmsg->msg_namelen < 0)
@@ -59,14 +54,16 @@ int get_compat_msghdr(struct msghdr *kmsg,
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
- kmsg->msg_control = compat_ptr(tmp3);
+
+ kmsg->msg_control = compat_ptr(msg.msg_control);
+ kmsg->msg_controllen = msg.msg_controllen;
if (save_addr)
- *save_addr = compat_ptr(uaddr);
+ *save_addr = compat_ptr(msg.msg_name);
- if (uaddr && kmsg->msg_namelen) {
+ if (msg.msg_name && kmsg->msg_namelen) {
if (!save_addr) {
- err = move_addr_to_kernel(compat_ptr(uaddr),
+ err = move_addr_to_kernel(compat_ptr(msg.msg_name),
kmsg->msg_namelen,
kmsg->msg_name);
if (err < 0)
@@ -77,13 +74,13 @@ int get_compat_msghdr(struct msghdr *kmsg,
kmsg->msg_namelen = 0;
}
- if (nr_segs > UIO_MAXIOV)
+ if (msg.msg_iovlen > UIO_MAXIOV)
return -EMSGSIZE;
kmsg->msg_iocb = NULL;
return compat_import_iovec(save_addr ? READ : WRITE,
- compat_ptr(uiov), nr_segs,
+ compat_ptr(msg.msg_iov), msg.msg_iovlen,
UIO_FASTIOV, iov, &kmsg->msg_iter);
}
@@ -316,15 +313,15 @@ struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval)
{
struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
- compat_uptr_t ptr;
- u16 len;
-
- if (!access_ok(VERIFY_READ, fprog32, sizeof(*fprog32)) ||
- !access_ok(VERIFY_WRITE, kfprog, sizeof(struct sock_fprog)) ||
- __get_user(len, &fprog32->len) ||
- __get_user(ptr, &fprog32->filter) ||
- __put_user(len, &kfprog->len) ||
- __put_user(compat_ptr(ptr), &kfprog->filter))
+ struct compat_sock_fprog f32;
+ struct sock_fprog f;
+
+ if (copy_from_user(&f32, fprog32, sizeof(*fprog32)))
+ return NULL;
+ memset(&f, 0, sizeof(f));
+ f.len = f32.len;
+ f.filter = compat_ptr(f32.filter);
+ if (copy_to_user(kfprog, &f, sizeof(struct sock_fprog)))
return NULL;
return kfprog;
diff --git a/net/core/dev.c b/net/core/dev.c
index d1b9c9b6c970..509af6ce8831 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -7398,7 +7398,7 @@ static int netif_alloc_rx_queues(struct net_device *dev)
BUG_ON(count < 1);
- rx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
+ rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!rx)
return -ENOMEM;
@@ -7438,7 +7438,7 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
if (count < 1 || count > 0xffff)
return -EINVAL;
- tx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
+ tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!tx)
return -ENOMEM;
@@ -7979,7 +7979,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
/* ensure 32-byte alignment of whole construct */
alloc_size += NETDEV_ALIGN - 1;
- p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_REPEAT);
+ p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!p)
return NULL;
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 82fd4c9c4a1b..06b147d7d9e2 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
+ ifr.ifr_name[IFNAMSIZ-1] = 0;
error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
if (error)
@@ -424,6 +425,8 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (copy_from_user(&iwr, arg, sizeof(iwr)))
return -EFAULT;
+ iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0;
+
return wext_handle_ioctl(net, &iwr, cmd, arg);
}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index a0093e1b0235..fdcb1bcd2afa 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -400,6 +400,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
err = -ENOMEM;
goto errout;
}
+ refcount_set(&rule->refcnt, 1);
rule->fr_net = net;
rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
@@ -517,8 +518,6 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
last = r;
}
- refcount_set(&rule->refcnt, 1);
-
if (last)
list_add_rcu(&rule->list, &last->list);
else
diff --git a/net/core/filter.c b/net/core/filter.c
index 29e690cbe820..7e9708653c6f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2275,7 +2275,7 @@ static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
bpf_skb_net_grow(skb, len_diff_abs);
bpf_compute_data_end(skb);
- return 0;
+ return ret;
}
BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e31fc11a8000..d0713627deb6 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -347,8 +347,7 @@ out_entries:
static void neigh_get_hash_rnd(u32 *x)
{
- get_random_bytes(x, sizeof(*x));
- *x |= 1;
+ *x = get_random_u32() | 1;
}
static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d3408a693166..8357f164c660 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -277,7 +277,7 @@ static void zap_completion_queue(void)
struct sk_buff *skb = clist;
clist = clist->next;
if (!skb_irq_freeable(skb)) {
- refcount_inc(&skb->users);
+ refcount_set(&skb->users, 1);
dev_kfree_skb_any(skb); /* put this one back */
} else {
__kfree_skb(skb);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d1ba90980be1..9201e3621351 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2031,7 +2031,8 @@ static int do_setlink(const struct sk_buff *skb,
struct sockaddr *sa;
int len;
- len = sizeof(sa_family_t) + dev->addr_len;
+ len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
+ sizeof(*sa));
sa = kmalloc(len, GFP_KERNEL);
if (!sa) {
err = -ENOMEM;
@@ -4241,6 +4242,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
switch (event) {
case NETDEV_REBOOT:
+ case NETDEV_CHANGEADDR:
case NETDEV_CHANGENAME:
case NETDEV_FEAT_CHANGE:
case NETDEV_BONDING_FAILOVER:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6bc19c80c210..84bdfa229b0d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4723,7 +4723,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
gfp_head = gfp_mask;
if (gfp_head & __GFP_DIRECT_RECLAIM)
- gfp_head |= __GFP_REPEAT;
+ gfp_head |= __GFP_RETRY_MAYFAIL;
*errcode = -ENOBUFS;
skb = alloc_skb(header_len, gfp_head);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 4a05d7876850..fa6be9750bb4 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -126,7 +126,7 @@ static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
static u16 dccp_reset_code_convert(const u8 code)
{
- const u16 error_code[] = {
+ static const u16 error_code[] = {
[DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */
[DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */
[DCCP_RESET_CODE_ABORTED] = ECONNRESET,
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4e678fa892dd..044d2a159a3c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1334,13 +1334,14 @@ static struct pernet_operations fib_net_ops = {
void __init ip_fib_init(void)
{
- rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
- rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
+ fib_trie_init();
register_pernet_subsys(&fib_net_ops);
+
register_netdevice_notifier(&fib_netdev_notifier);
register_inetaddr_notifier(&fib_inetaddr_notifier);
- fib_trie_init();
+ rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
+ rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
+ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d338f865951a..b631ec685d77 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -599,6 +599,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
hlen = iph->ihl * 4;
mtu = mtu - hlen; /* Size of data space */
IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
+ ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
/* When frag_list is given, use it. First, check its validity:
* some transformers could create wrong frag_list or break existing
@@ -614,14 +615,15 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
ip_is_fragment(iph) ||
- skb_cloned(skb))
+ skb_cloned(skb) ||
+ skb_headroom(skb) < ll_rs)
goto slow_path;
skb_walk_frags(skb, frag) {
/* Correct geometry. */
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
- skb_headroom(frag) < hlen)
+ skb_headroom(frag) < hlen + ll_rs)
goto slow_path_clean;
/* Partially cloned skb? */
@@ -711,8 +713,6 @@ slow_path:
left = skb->len - hlen; /* Space per frame */
ptr = hlen; /* Where to start from */
- ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
-
/*
* Fragment the datagram.
*/
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
index 805c8ddfe860..4bbc273b45e8 100644
--- a/net/ipv4/netfilter/nf_tables_arp.c
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -72,8 +72,7 @@ static const struct nf_chain_type filter_arp = {
.family = NFPROTO_ARP,
.owner = THIS_MODULE,
.hook_mask = (1 << NF_ARP_IN) |
- (1 << NF_ARP_OUT) |
- (1 << NF_ARP_FORWARD),
+ (1 << NF_ARP_OUT),
};
static int __init nf_tables_arp_init(void)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c816cd53f7fc..0383e66f59bc 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2979,8 +2979,7 @@ static __net_init int rt_genid_init(struct net *net)
{
atomic_set(&net->ipv4.rt_genid, 0);
atomic_set(&net->fnhe_genid, 0);
- get_random_bytes(&net->ipv4.dev_addr_genid,
- sizeof(net->ipv4.dev_addr_genid));
+ atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
return 0;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 0905cf04c2a4..03ad8778c395 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -335,6 +335,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
treq->ts_off = 0;
+ treq->txhash = net_tx_rndhash();
req->mss = mss;
ireq->ir_num = ntohs(th->dest);
ireq->ir_rmt_port = th->source;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index dbcc9352a48f..69ee877574d0 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -112,7 +112,8 @@ struct bbr {
cwnd_gain:10, /* current gain for setting cwnd */
full_bw_cnt:3, /* number of rounds without large bw gains */
cycle_idx:3, /* current index in pacing_gain cycle array */
- unused_b:6;
+ has_seen_rtt:1, /* have we seen an RTT sample yet? */
+ unused_b:5;
u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
u32 full_bw; /* recent bw, to estimate if pipe is full */
};
@@ -211,6 +212,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
return rate >> BW_SCALE;
}
+/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
+static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
+{
+ u64 rate = bw;
+
+ rate = bbr_rate_bytes_per_sec(sk, rate, gain);
+ rate = min_t(u64, rate, sk->sk_max_pacing_rate);
+ return rate;
+}
+
+/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
+static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u64 bw;
+ u32 rtt_us;
+
+ if (tp->srtt_us) { /* any RTT sample yet? */
+ rtt_us = max(tp->srtt_us >> 3, 1U);
+ bbr->has_seen_rtt = 1;
+ } else { /* no RTT sample yet */
+ rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
+ }
+ bw = (u64)tp->snd_cwnd * BW_UNIT;
+ do_div(bw, rtt_us);
+ sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
+}
+
/* Pace using current bw estimate and a gain factor. In order to help drive the
* network toward lower queues while maintaining high utilization and low
* latency, the average pacing rate aims to be slightly (~1%) lower than the
@@ -220,12 +250,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
*/
static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
{
+ struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
- u64 rate = bw;
+ u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
- rate = bbr_rate_bytes_per_sec(sk, rate, gain);
- rate = min_t(u64, rate, sk->sk_max_pacing_rate);
- if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
+ if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
+ bbr_init_pacing_rate_from_rtt(sk);
+ if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
sk->sk_pacing_rate = rate;
}
@@ -798,7 +829,6 @@ static void bbr_init(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
- u64 bw;
bbr->prior_cwnd = 0;
bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
@@ -814,11 +844,8 @@ static void bbr_init(struct sock *sk)
minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
- /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
- bw = (u64)tp->snd_cwnd * BW_UNIT;
- do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
- sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
- bbr_set_pacing_rate(sk, bw, bbr_high_gain);
+ bbr->has_seen_rtt = 0;
+ bbr_init_pacing_rate_from_rtt(sk);
bbr->restore_cwnd = 0;
bbr->round_start = 0;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 25294d43e147..b057653ceca9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1388,6 +1388,11 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
unlock_sock_fast(sk, slow);
}
+ /* we cleared the head states previously only if the skb lacks any IP
+ * options, see __udp_queue_rcv_skb().
+ */
+ if (unlikely(IPCB(skb)->opt.optlen > 0))
+ skb_release_head_state(skb);
consume_stateless_skb(skb);
}
EXPORT_SYMBOL_GPL(skb_consume_udp);
@@ -1779,8 +1784,12 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk_mark_napi_id_once(sk, skb);
}
- /* clear all pending head states while they are hot in the cache */
- skb_release_head_state(skb);
+ /* At recvmsg() time we need skb->dst to process IP options-related
+ * cmsg, elsewhere can we clear all pending head states while they are
+ * hot in the cache
+ */
+ if (likely(IPCB(skb)->opt.optlen == 0))
+ skb_release_head_state(skb);
rc = __udp_enqueue_schedule_skb(sk, skb);
if (rc < 0) {
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index e9065b8d3af8..abb2c307fbe8 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{
- u16 offset = sizeof(struct ipv6hdr);
+ unsigned int offset = sizeof(struct ipv6hdr);
unsigned int packet_len = skb_tail_pointer(skb) -
skb_network_header(skb);
int found_rhdr = 0;
@@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
while (offset <= packet_len) {
struct ipv6_opt_hdr *exthdr;
+ unsigned int len;
switch (**nexthdr) {
@@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
- offset += ipv6_optlen(exthdr);
+ len = ipv6_optlen(exthdr);
+ if (len + offset >= IPV6_MAXPLEN)
+ return -EINVAL;
+ offset += len;
*nexthdr = &exthdr->nexthdr;
}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 7b75b0620730..4e7817abc0b9 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -216,6 +216,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
treq->ts_off = 0;
+ treq->txhash = net_tx_rndhash();
/*
* We need to lookup the dst_entry to get the correct window size.
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 552d606e57ca..974cf2a3795a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -227,114 +227,6 @@ void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
}
EXPORT_SYMBOL(nf_unregister_net_hooks);
-static LIST_HEAD(nf_hook_list);
-
-static int _nf_register_hook(struct nf_hook_ops *reg)
-{
- struct net *net, *last;
- int ret;
-
- for_each_net(net) {
- ret = nf_register_net_hook(net, reg);
- if (ret && ret != -ENOENT)
- goto rollback;
- }
- list_add_tail(&reg->list, &nf_hook_list);
-
- return 0;
-rollback:
- last = net;
- for_each_net(net) {
- if (net == last)
- break;
- nf_unregister_net_hook(net, reg);
- }
- return ret;
-}
-
-int nf_register_hook(struct nf_hook_ops *reg)
-{
- int ret;
-
- rtnl_lock();
- ret = _nf_register_hook(reg);
- rtnl_unlock();
-
- return ret;
-}
-EXPORT_SYMBOL(nf_register_hook);
-
-static void _nf_unregister_hook(struct nf_hook_ops *reg)
-{
- struct net *net;
-
- list_del(&reg->list);
- for_each_net(net)
- nf_unregister_net_hook(net, reg);
-}
-
-void nf_unregister_hook(struct nf_hook_ops *reg)
-{
- rtnl_lock();
- _nf_unregister_hook(reg);
- rtnl_unlock();
-}
-EXPORT_SYMBOL(nf_unregister_hook);
-
-int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
-{
- unsigned int i;
- int err = 0;
-
- for (i = 0; i < n; i++) {
- err = nf_register_hook(&reg[i]);
- if (err)
- goto err;
- }
- return err;
-
-err:
- if (i > 0)
- nf_unregister_hooks(reg, i);
- return err;
-}
-EXPORT_SYMBOL(nf_register_hooks);
-
-/* Caller MUST take rtnl_lock() */
-int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
-{
- unsigned int i;
- int err = 0;
-
- for (i = 0; i < n; i++) {
- err = _nf_register_hook(&reg[i]);
- if (err)
- goto err;
- }
- return err;
-
-err:
- if (i > 0)
- _nf_unregister_hooks(reg, i);
- return err;
-}
-EXPORT_SYMBOL(_nf_register_hooks);
-
-void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
-{
- while (n-- > 0)
- nf_unregister_hook(&reg[n]);
-}
-EXPORT_SYMBOL(nf_unregister_hooks);
-
-/* Caller MUST take rtnl_lock */
-void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
-{
- while (n-- > 0)
- _nf_unregister_hook(&reg[n]);
-}
-EXPORT_SYMBOL(_nf_unregister_hooks);
-
/* Returns 1 if okfn() needs to be executed by the caller,
* -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
@@ -450,40 +342,9 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
EXPORT_SYMBOL(nf_nat_decode_session_hook);
#endif
-static int nf_register_hook_list(struct net *net)
-{
- struct nf_hook_ops *elem;
- int ret;
-
- rtnl_lock();
- list_for_each_entry(elem, &nf_hook_list, list) {
- ret = nf_register_net_hook(net, elem);
- if (ret && ret != -ENOENT)
- goto out_undo;
- }
- rtnl_unlock();
- return 0;
-
-out_undo:
- list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
- nf_unregister_net_hook(net, elem);
- rtnl_unlock();
- return ret;
-}
-
-static void nf_unregister_hook_list(struct net *net)
-{
- struct nf_hook_ops *elem;
-
- rtnl_lock();
- list_for_each_entry(elem, &nf_hook_list, list)
- nf_unregister_net_hook(net, elem);
- rtnl_unlock();
-}
-
static int __net_init netfilter_net_init(struct net *net)
{
- int i, h, ret;
+ int i, h;
for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
for (h = 0; h < NF_MAX_HOOKS; h++)
@@ -500,16 +361,12 @@ static int __net_init netfilter_net_init(struct net *net)
return -ENOMEM;
}
#endif
- ret = nf_register_hook_list(net);
- if (ret)
- remove_proc_entry("netfilter", net->proc_net);
- return ret;
+ return 0;
}
static void __net_exit netfilter_net_exit(struct net *net)
{
- nf_unregister_hook_list(net);
remove_proc_entry("netfilter", net->proc_net);
}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index e03d16ed550d..899c2c36da13 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -422,7 +422,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
h = nf_ct_expect_dst_hash(net, &expect->tuple);
hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
if (expect_matches(i, expect)) {
- if (nf_ct_remove_expect(expect))
+ if (nf_ct_remove_expect(i))
break;
} else if (expect_clash(i, expect)) {
ret = -EBUSY;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 832c5a08d9a5..eb541786ccb7 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -222,20 +222,21 @@ find_appropriate_src(struct net *net,
.tuple = tuple,
.zone = zone
};
- struct rhlist_head *hl;
+ struct rhlist_head *hl, *h;
hl = rhltable_lookup(&nf_nat_bysource_table, &key,
nf_nat_bysource_params);
- if (!hl)
- return 0;
- ct = container_of(hl, typeof(*ct), nat_bysource);
+ rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
+ nf_ct_invert_tuplepr(result,
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ result->dst = tuple->dst;
- nf_ct_invert_tuplepr(result,
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
- result->dst = tuple->dst;
+ if (in_range(l3proto, l4proto, result, range))
+ return 1;
+ }
- return in_range(l3proto, l4proto, result, range);
+ return 0;
}
/* For [FUTURE] fragmentation handling, we want the least-used
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 92b05e188fd1..733d3e4a30d8 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -472,8 +472,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
if (msglen > skb->len)
msglen = skb->len;
- if (nlh->nlmsg_len < NLMSG_HDRLEN ||
- skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
+ if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
return;
err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy,
@@ -500,7 +499,8 @@ static void nfnetlink_rcv(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
- if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+ if (skb->len < NLMSG_HDRLEN ||
+ nlh->nlmsg_len < NLMSG_HDRLEN ||
skb->len < nlh->nlmsg_len)
return;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 1770c1d9b37f..e1648238a9c9 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1003,14 +1003,10 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
return NULL;
- if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
- info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!info) {
- info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
- PAGE_KERNEL);
- if (!info)
- return NULL;
- }
+ info = kvmalloc(sz, GFP_KERNEL);
+ if (!info)
+ return NULL;
+
memset(info, 0, sizeof(*info));
info->size = size;
return info;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 08679ebb3068..e3c4c6c3fef7 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -629,6 +629,34 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
return ct;
}
+static
+struct nf_conn *ovs_ct_executed(struct net *net,
+ const struct sw_flow_key *key,
+ const struct ovs_conntrack_info *info,
+ struct sk_buff *skb,
+ bool *ct_executed)
+{
+ struct nf_conn *ct = NULL;
+
+ /* If no ct, check if we have evidence that an existing conntrack entry
+ * might be found for this skb. This happens when we lose a skb->_nfct
+ * due to an upcall, or if the direction is being forced. If the
+ * connection was not confirmed, it is not cached and needs to be run
+ * through conntrack again.
+ */
+ *ct_executed = (key->ct_state & OVS_CS_F_TRACKED) &&
+ !(key->ct_state & OVS_CS_F_INVALID) &&
+ (key->ct_zone == info->zone.id);
+
+ if (*ct_executed || (!key->ct_state && info->force)) {
+ ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
+ !!(key->ct_state &
+ OVS_CS_F_NAT_MASK));
+ }
+
+ return ct;
+}
+
/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
static bool skb_nfct_cached(struct net *net,
const struct sw_flow_key *key,
@@ -637,24 +665,17 @@ static bool skb_nfct_cached(struct net *net,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
+ bool ct_executed = true;
ct = nf_ct_get(skb, &ctinfo);
- /* If no ct, check if we have evidence that an existing conntrack entry
- * might be found for this skb. This happens when we lose a skb->_nfct
- * due to an upcall. If the connection was not confirmed, it is not
- * cached and needs to be run through conntrack again.
- */
- if (!ct && key->ct_state & OVS_CS_F_TRACKED &&
- !(key->ct_state & OVS_CS_F_INVALID) &&
- key->ct_zone == info->zone.id) {
- ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
- !!(key->ct_state
- & OVS_CS_F_NAT_MASK));
- if (ct)
- nf_ct_get(skb, &ctinfo);
- }
if (!ct)
+ ct = ovs_ct_executed(net, key, info, skb, &ct_executed);
+
+ if (ct)
+ nf_ct_get(skb, &ctinfo);
+ else
return false;
+
if (!net_eq(net, read_pnet(&ct->ct_net)))
return false;
if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct)))
@@ -679,7 +700,7 @@ static bool skb_nfct_cached(struct net *net,
return false;
}
- return true;
+ return ct_executed;
}
#ifdef CONFIG_NF_NAT_NEEDED
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ee035cbe5621..e7303f68972d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -212,6 +212,7 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *,
static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
struct tpacket3_hdr *);
static void packet_flush_mclist(struct sock *sk);
+static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb);
struct packet_skb_cb {
union {
@@ -258,6 +259,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
if (skb != orig_skb)
goto drop;
+ packet_pick_tx_queue(dev, skb);
txq = skb_get_tx_queue(dev, skb);
local_bh_disable();
@@ -2745,8 +2747,6 @@ tpacket_error:
goto tpacket_error;
}
- packet_pick_tx_queue(dev, skb);
-
skb->destructor = tpacket_destruct_skb;
__packet_set_status(po, ph, TP_STATUS_SENDING);
packet_inc_pending(&po->tx_ring);
@@ -2929,8 +2929,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->priority = sk->sk_priority;
skb->mark = sockc.mark;
- packet_pick_tx_queue(dev, skb);
-
if (po->has_vnet_hdr) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
if (err)
diff --git a/net/rds/send.c b/net/rds/send.c
index e81aa176f4e2..41b9f0f5bb9c 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -170,8 +170,8 @@ restart:
* The acquire_in_xmit() check above ensures that only one
* caller can increment c_send_gen at any time.
*/
- cp->cp_send_gen++;
- send_gen = cp->cp_send_gen;
+ send_gen = READ_ONCE(cp->cp_send_gen) + 1;
+ WRITE_ONCE(cp->cp_send_gen, send_gen);
/*
* rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
@@ -431,7 +431,7 @@ over_batch:
smp_mb();
if ((test_bit(0, &conn->c_map_queued) ||
!list_empty(&cp->cp_send_queue)) &&
- send_gen == cp->cp_send_gen) {
+ send_gen == READ_ONCE(cp->cp_send_gen)) {
rds_stats_inc(s_send_lock_queue_raced);
if (batch_count < send_batch_count)
goto restart;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index aed6cf2e9fd8..f2e9ed34a963 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -835,7 +835,7 @@ out_nlmsg_trim:
}
static int
-act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
+tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
struct list_head *actions, int event)
{
struct sk_buff *skb;
@@ -1018,7 +1018,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
}
if (event == RTM_GETACTION)
- ret = act_get_notify(net, portid, n, &actions, event);
+ ret = tcf_get_notify(net, portid, n, &actions, event);
else { /* delete */
ret = tcf_del_notify(net, n, &actions, portid);
if (ret)
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 147fde73a0f5..263d16e3219e 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -648,7 +648,7 @@ static int fq_resize(struct Qdisc *sch, u32 log)
return 0;
/* If XPS was setup, we can allocate memory on right NUMA node */
- array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_REPEAT,
+ array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
netdev_queue_numa_node_read(sch->dev_queue));
if (!array)
return -ENOMEM;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 06dc351b6ba1..0b36e96cb0df 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
struct sctp_adaptation_ind_param aiparam;
struct sctp_supported_ext_param ext_param;
int num_ext = 0;
- __u8 extensions[3];
+ __u8 extensions[4];
struct sctp_paramhdr *auth_chunks = NULL,
*auth_hmacs = NULL;
@@ -393,7 +393,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
struct sctp_adaptation_ind_param aiparam;
struct sctp_supported_ext_param ext_param;
int num_ext = 0;
- __u8 extensions[3];
+ __u8 extensions[4];
struct sctp_paramhdr *auth_chunks = NULL,
*auth_hmacs = NULL,
*auth_random = NULL;
diff --git a/net/socket.c b/net/socket.c
index 59e902b9df09..bf2122691fba 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1910,22 +1910,18 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
struct sockaddr __user **save_addr,
struct iovec **iov)
{
- struct sockaddr __user *uaddr;
- struct iovec __user *uiov;
- size_t nr_segs;
+ struct user_msghdr msg;
ssize_t err;
- if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) ||
- __get_user(uaddr, &umsg->msg_name) ||
- __get_user(kmsg->msg_namelen, &umsg->msg_namelen) ||
- __get_user(uiov, &umsg->msg_iov) ||
- __get_user(nr_segs, &umsg->msg_iovlen) ||
- __get_user(kmsg->msg_control, &umsg->msg_control) ||
- __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
- __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ if (copy_from_user(&msg, umsg, sizeof(*umsg)))
return -EFAULT;
- if (!uaddr)
+ kmsg->msg_control = msg.msg_control;
+ kmsg->msg_controllen = msg.msg_controllen;
+ kmsg->msg_flags = msg.msg_flags;
+
+ kmsg->msg_namelen = msg.msg_namelen;
+ if (!msg.msg_name)
kmsg->msg_namelen = 0;
if (kmsg->msg_namelen < 0)
@@ -1935,11 +1931,11 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
if (save_addr)
- *save_addr = uaddr;
+ *save_addr = msg.msg_name;
- if (uaddr && kmsg->msg_namelen) {
+ if (msg.msg_name && kmsg->msg_namelen) {
if (!save_addr) {
- err = move_addr_to_kernel(uaddr, kmsg->msg_namelen,
+ err = move_addr_to_kernel(msg.msg_name, kmsg->msg_namelen,
kmsg->msg_name);
if (err < 0)
return err;
@@ -1949,12 +1945,13 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
kmsg->msg_namelen = 0;
}
- if (nr_segs > UIO_MAXIOV)
+ if (msg.msg_iovlen > UIO_MAXIOV)
return -EMSGSIZE;
kmsg->msg_iocb = NULL;
- return import_iovec(save_addr ? READ : WRITE, uiov, nr_segs,
+ return import_iovec(save_addr ? READ : WRITE,
+ msg.msg_iov, msg.msg_iovlen,
UIO_FASTIOV, iov, &kmsg->msg_iter);
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index fb39284ec174..12649c9fedab 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -34,6 +34,7 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
+#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/err.h>
@@ -927,7 +928,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
if (ret)
goto out_err;
- if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
+ if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
ret = GSS_S_BAD_SIG;
goto out_err;
}
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index f0c6a8c78a56..46b295e4f2b8 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -55,15 +55,15 @@ enum {
#define PROC(proc, name) \
[GSSX_##proc] = { \
.p_proc = GSSX_##proc, \
- .p_encode = (kxdreproc_t)gssx_enc_##name, \
- .p_decode = (kxdrdproc_t)gssx_dec_##name, \
+ .p_encode = gssx_enc_##name, \
+ .p_decode = gssx_dec_##name, \
.p_arglen = GSSX_ARG_##name##_sz, \
.p_replen = GSSX_RES_##name##_sz, \
.p_statidx = GSSX_##proc, \
.p_name = #proc, \
}
-static struct rpc_procinfo gssp_procedures[] = {
+static const struct rpc_procinfo gssp_procedures[] = {
PROC(INDICATE_MECHS, indicate_mechs),
PROC(GET_CALL_CONTEXT, get_call_context),
PROC(IMPORT_AND_CANON_NAME, import_and_canon_name),
@@ -364,11 +364,12 @@ void gssp_free_upcall_data(struct gssp_upcall_data *data)
/*
* Initialization stuff
*/
-
+static unsigned int gssp_version1_counts[ARRAY_SIZE(gssp_procedures)];
static const struct rpc_version gssp_version1 = {
.number = GSSPROXY_VERS_1,
.nrprocs = ARRAY_SIZE(gssp_procedures),
.procs = gssp_procedures,
+ .counts = gssp_version1_counts,
};
static const struct rpc_version *gssp_version[] = {
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 25d9a9cf7b66..c4778cae58ef 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -44,7 +44,7 @@ static int gssx_dec_bool(struct xdr_stream *xdr, u32 *v)
}
static int gssx_enc_buffer(struct xdr_stream *xdr,
- gssx_buffer *buf)
+ const gssx_buffer *buf)
{
__be32 *p;
@@ -56,7 +56,7 @@ static int gssx_enc_buffer(struct xdr_stream *xdr,
}
static int gssx_enc_in_token(struct xdr_stream *xdr,
- struct gssp_in_token *in)
+ const struct gssp_in_token *in)
{
__be32 *p;
@@ -130,7 +130,7 @@ static int gssx_dec_option(struct xdr_stream *xdr,
}
static int dummy_enc_opt_array(struct xdr_stream *xdr,
- struct gssx_option_array *oa)
+ const struct gssx_option_array *oa)
{
__be32 *p;
@@ -348,7 +348,7 @@ static int gssx_dec_status(struct xdr_stream *xdr,
}
static int gssx_enc_call_ctx(struct xdr_stream *xdr,
- struct gssx_call_ctx *ctx)
+ const struct gssx_call_ctx *ctx)
{
struct gssx_option opt;
__be32 *p;
@@ -733,8 +733,9 @@ static int gssx_enc_cb(struct xdr_stream *xdr, struct gssx_cb *cb)
void gssx_enc_accept_sec_context(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct gssx_arg_accept_sec_context *arg)
+ const void *data)
{
+ const struct gssx_arg_accept_sec_context *arg = data;
int err;
err = gssx_enc_call_ctx(xdr, &arg->call_ctx);
@@ -789,8 +790,9 @@ done:
int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct gssx_res_accept_sec_context *res)
+ void *data)
{
+ struct gssx_res_accept_sec_context *res = data;
u32 value_follows;
int err;
struct page *scratch;
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.h b/net/sunrpc/auth_gss/gss_rpc_xdr.h
index 9d88c6239f01..146c31032917 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.h
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.h
@@ -179,10 +179,10 @@ struct gssx_res_accept_sec_context {
#define gssx_dec_init_sec_context NULL
void gssx_enc_accept_sec_context(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct gssx_arg_accept_sec_context *args);
+ const void *data);
int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
- struct gssx_res_accept_sec_context *res);
+ void *data);
#define gssx_enc_release_handle NULL
#define gssx_dec_release_handle NULL
#define gssx_enc_get_mic NULL
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index a54a7a3d28f5..7b1ee5a0b03c 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -838,6 +838,14 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
struct xdr_netobj mic;
struct xdr_buf integ_buf;
+ /* NFS READ normally uses splice to send data in-place. However
+ * the data in cache can change after the reply's MIC is computed
+ * but before the RPC reply is sent. To prevent the client from
+ * rejecting the server-computed MIC in this somewhat rare case,
+ * do not use splice with the GSS integrity service.
+ */
+ clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+
/* Did we already verify the signature on the original pass through? */
if (rqstp->rq_deferred)
return 0;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index b5cb921775a0..2e49d1f892b7 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1517,14 +1517,16 @@ static void
call_start(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
+ int idx = task->tk_msg.rpc_proc->p_statidx;
dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
clnt->cl_program->name, clnt->cl_vers,
rpc_proc_name(task),
(RPC_IS_ASYNC(task) ? "async" : "sync"));
- /* Increment call count */
- task->tk_msg.rpc_proc->p_count++;
+ /* Increment call count (version might not be valid for ping) */
+ if (clnt->cl_program->version[clnt->cl_vers])
+ clnt->cl_program->version[clnt->cl_vers]->counts[idx]++;
clnt->cl_stats->rpccnt++;
task->tk_action = call_reserve;
}
@@ -1672,7 +1674,7 @@ call_allocate(struct rpc_task *task)
unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
- struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
+ const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
int status;
dprint_status(task);
@@ -2476,16 +2478,18 @@ out_overflow:
goto out_garbage;
}
-static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
+static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ const void *obj)
{
}
-static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
+static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ void *obj)
{
return 0;
}
-static struct rpc_procinfo rpcproc_null = {
+static const struct rpc_procinfo rpcproc_null = {
.p_encode = rpcproc_encode_null,
.p_decode = rpcproc_decode_null,
};
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 5b30603596d0..ea0676f199c8 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -128,13 +128,13 @@ struct rpcbind_args {
int r_status;
};
-static struct rpc_procinfo rpcb_procedures2[];
-static struct rpc_procinfo rpcb_procedures3[];
-static struct rpc_procinfo rpcb_procedures4[];
+static const struct rpc_procinfo rpcb_procedures2[];
+static const struct rpc_procinfo rpcb_procedures3[];
+static const struct rpc_procinfo rpcb_procedures4[];
struct rpcb_info {
u32 rpc_vers;
- struct rpc_procinfo * rpc_proc;
+ const struct rpc_procinfo *rpc_proc;
};
static const struct rpcb_info rpcb_next_version[];
@@ -620,7 +620,8 @@ int rpcb_v4_register(struct net *net, const u32 program, const u32 version,
return -EAFNOSUPPORT;
}
-static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbind_args *map, struct rpc_procinfo *proc)
+static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt,
+ struct rpcbind_args *map, const struct rpc_procinfo *proc)
{
struct rpc_message msg = {
.rpc_proc = proc,
@@ -671,7 +672,7 @@ static struct rpc_clnt *rpcb_find_transport_owner(struct rpc_clnt *clnt)
void rpcb_getport_async(struct rpc_task *task)
{
struct rpc_clnt *clnt;
- struct rpc_procinfo *proc;
+ const struct rpc_procinfo *proc;
u32 bind_version;
struct rpc_xprt *xprt;
struct rpc_clnt *rpcb_clnt;
@@ -843,8 +844,9 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
*/
static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct rpcbind_args *rpcb)
+ const void *data)
{
+ const struct rpcbind_args *rpcb = data;
__be32 *p;
dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
@@ -860,8 +862,9 @@ static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
}
static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct rpcbind_args *rpcb)
+ void *data)
{
+ struct rpcbind_args *rpcb = data;
unsigned long port;
__be32 *p;
@@ -882,8 +885,9 @@ static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
}
static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
- unsigned int *boolp)
+ void *data)
{
+ unsigned int *boolp = data;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
@@ -917,8 +921,9 @@ static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
}
static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
- const struct rpcbind_args *rpcb)
+ const void *data)
{
+ const struct rpcbind_args *rpcb = data;
__be32 *p;
dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
@@ -937,8 +942,9 @@ static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
}
static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct rpcbind_args *rpcb)
+ void *data)
{
+ struct rpcbind_args *rpcb = data;
struct sockaddr_storage address;
struct sockaddr *sap = (struct sockaddr *)&address;
__be32 *p;
@@ -989,11 +995,11 @@ out_fail:
* since the Linux kernel RPC code requires only these.
*/
-static struct rpc_procinfo rpcb_procedures2[] = {
+static const struct rpc_procinfo rpcb_procedures2[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdreproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrdproc_t)rpcb_dec_set,
+ .p_encode = rpcb_enc_mapping,
+ .p_decode = rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -1002,8 +1008,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdreproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrdproc_t)rpcb_dec_set,
+ .p_encode = rpcb_enc_mapping,
+ .p_decode = rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -1012,8 +1018,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
},
[RPCBPROC_GETPORT] = {
.p_proc = RPCBPROC_GETPORT,
- .p_encode = (kxdreproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrdproc_t)rpcb_dec_getport,
+ .p_encode = rpcb_enc_mapping,
+ .p_decode = rpcb_dec_getport,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_getportres_sz,
.p_statidx = RPCBPROC_GETPORT,
@@ -1022,11 +1028,11 @@ static struct rpc_procinfo rpcb_procedures2[] = {
},
};
-static struct rpc_procinfo rpcb_procedures3[] = {
+static const struct rpc_procinfo rpcb_procedures3[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrdproc_t)rpcb_dec_set,
+ .p_encode = rpcb_enc_getaddr,
+ .p_decode = rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -1035,8 +1041,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrdproc_t)rpcb_dec_set,
+ .p_encode = rpcb_enc_getaddr,
+ .p_decode = rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -1045,8 +1051,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
- .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
+ .p_encode = rpcb_enc_getaddr,
+ .p_decode = rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
@@ -1055,11 +1061,11 @@ static struct rpc_procinfo rpcb_procedures3[] = {
},
};
-static struct rpc_procinfo rpcb_procedures4[] = {
+static const struct rpc_procinfo rpcb_procedures4[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrdproc_t)rpcb_dec_set,
+ .p_encode = rpcb_enc_getaddr,
+ .p_decode = rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -1068,8 +1074,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrdproc_t)rpcb_dec_set,
+ .p_encode = rpcb_enc_getaddr,
+ .p_decode = rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -1078,8 +1084,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
- .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
+ .p_encode = rpcb_enc_getaddr,
+ .p_decode = rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
@@ -1112,22 +1118,28 @@ static const struct rpcb_info rpcb_next_version6[] = {
},
};
+static unsigned int rpcb_version2_counts[ARRAY_SIZE(rpcb_procedures2)];
static const struct rpc_version rpcb_version2 = {
.number = RPCBVERS_2,
.nrprocs = ARRAY_SIZE(rpcb_procedures2),
- .procs = rpcb_procedures2
+ .procs = rpcb_procedures2,
+ .counts = rpcb_version2_counts,
};
+static unsigned int rpcb_version3_counts[ARRAY_SIZE(rpcb_procedures3)];
static const struct rpc_version rpcb_version3 = {
.number = RPCBVERS_3,
.nrprocs = ARRAY_SIZE(rpcb_procedures3),
- .procs = rpcb_procedures3
+ .procs = rpcb_procedures3,
+ .counts = rpcb_version3_counts,
};
+static unsigned int rpcb_version4_counts[ARRAY_SIZE(rpcb_procedures4)];
static const struct rpc_version rpcb_version4 = {
.number = RPCBVERS_4,
.nrprocs = ARRAY_SIZE(rpcb_procedures4),
- .procs = rpcb_procedures4
+ .procs = rpcb_procedures4,
+ .counts = rpcb_version4_counts,
};
static const struct rpc_version *rpcb_version[] = {
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index caeb01ad2b5a..1e671333c3d5 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -55,8 +55,7 @@ static int rpc_proc_show(struct seq_file *seq, void *v) {
seq_printf(seq, "proc%u %u",
vers->number, vers->nrprocs);
for (j = 0; j < vers->nrprocs; j++)
- seq_printf(seq, " %u",
- vers->procs[j].p_count);
+ seq_printf(seq, " %u", vers->counts[j]);
seq_putc(seq, '\n');
}
return 0;
@@ -78,9 +77,9 @@ static const struct file_operations rpc_proc_fops = {
/*
* Get RPC server stats
*/
-void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
+void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp)
+{
const struct svc_program *prog = statp->program;
- const struct svc_procedure *proc;
const struct svc_version *vers;
unsigned int i, j;
@@ -99,11 +98,12 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
statp->rpcbadclnt);
for (i = 0; i < prog->pg_nvers; i++) {
- if (!(vers = prog->pg_vers[i]) || !(proc = vers->vs_proc))
+ vers = prog->pg_vers[i];
+ if (!vers)
continue;
seq_printf(seq, "proc%d %u", i, vers->vs_nproc);
- for (j = 0; j < vers->vs_nproc; j++, proc++)
- seq_printf(seq, " %u", proc->pc_count);
+ for (j = 0; j < vers->vs_nproc; j++)
+ seq_printf(seq, " %u", vers->vs_count[j]);
seq_putc(seq, '\n');
}
}
@@ -192,7 +192,7 @@ void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
EXPORT_SYMBOL_GPL(rpc_count_iostats);
static void _print_name(struct seq_file *seq, unsigned int op,
- struct rpc_procinfo *procs)
+ const struct rpc_procinfo *procs)
{
if (procs[op].p_name)
seq_printf(seq, "\t%12s: ", procs[op].p_name);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index bc0f5a0ecbdc..85ce0db5b0a6 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1008,7 +1008,7 @@ int svc_register(const struct svc_serv *serv, struct net *net,
const unsigned short port)
{
struct svc_program *progp;
- struct svc_version *vers;
+ const struct svc_version *vers;
unsigned int i;
int error = 0;
@@ -1151,10 +1151,9 @@ static int
svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
{
struct svc_program *progp;
- struct svc_version *versp = NULL; /* compiler food */
- struct svc_procedure *procp = NULL;
+ const struct svc_version *versp = NULL; /* compiler food */
+ const struct svc_procedure *procp = NULL;
struct svc_serv *serv = rqstp->rq_server;
- kxdrproc_t xdr;
__be32 *statp;
u32 prog, vers, proc;
__be32 auth_stat, rpc_stat;
@@ -1166,7 +1165,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
if (argv->iov_len < 6*4)
goto err_short_len;
- /* Will be turned off only in gss privacy case: */
+ /* Will be turned off by GSS integrity and privacy services */
set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* Will be turned off only when NFSv4 Sessions are used */
set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
@@ -1262,7 +1261,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
svc_putnl(resv, RPC_SUCCESS);
/* Bump per-procedure stats counter */
- procp->pc_count++;
+ versp->vs_count[proc]++;
/* Initialize storage for argp and resp */
memset(rqstp->rq_argp, 0, procp->pc_argsize);
@@ -1276,28 +1275,30 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
/* Call the function that processes the request. */
if (!versp->vs_dispatch) {
- /* Decode arguments */
- xdr = procp->pc_decode;
- if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
+ /*
+ * Decode arguments
+ * XXX: why do we ignore the return value?
+ */
+ if (procp->pc_decode &&
+ !procp->pc_decode(rqstp, argv->iov_base))
goto err_garbage;
- *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
+ *statp = procp->pc_func(rqstp);
/* Encode reply */
if (*statp == rpc_drop_reply ||
test_bit(RQ_DROPME, &rqstp->rq_flags)) {
if (procp->pc_release)
- procp->pc_release(rqstp, NULL, rqstp->rq_resp);
+ procp->pc_release(rqstp);
goto dropit;
}
if (*statp == rpc_autherr_badcred) {
if (procp->pc_release)
- procp->pc_release(rqstp, NULL, rqstp->rq_resp);
+ procp->pc_release(rqstp);
goto err_bad_auth;
}
- if (*statp == rpc_success &&
- (xdr = procp->pc_encode) &&
- !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
+ if (*statp == rpc_success && procp->pc_encode &&
+ !procp->pc_encode(rqstp, resv->iov_base + resv->iov_len)) {
dprintk("svc: failed to encode reply\n");
/* serv->sv_stats->rpcsystemerr++; */
*statp = rpc_system_err;
@@ -1307,7 +1308,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
if (!versp->vs_dispatch(rqstp, statp)) {
/* Release reply info */
if (procp->pc_release)
- procp->pc_release(rqstp, NULL, rqstp->rq_resp);
+ procp->pc_release(rqstp);
goto dropit;
}
}
@@ -1318,7 +1319,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
/* Release reply info */
if (procp->pc_release)
- procp->pc_release(rqstp, NULL, rqstp->rq_resp);
+ procp->pc_release(rqstp);
if (procp->pc_encode == NULL)
goto dropit;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 7bfe1fb42add..d16a8b423c20 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -659,11 +659,13 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
int i;
/* now allocate needed pages. If we get a failure, sleep briefly */
- pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
- WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES);
- if (pages >= RPCSVC_MAXPAGES)
+ pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT;
+ if (pages > RPCSVC_MAXPAGES) {
+ pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n",
+ pages, RPCSVC_MAXPAGES);
/* use as many pages as possible */
- pages = RPCSVC_MAXPAGES - 1;
+ pages = RPCSVC_MAXPAGES;
+ }
for (i = 0; i < pages ; i++)
while (rqstp->rq_pages[i] == NULL) {
struct page *p = alloc_page(GFP_KERNEL);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3e63c5e97ebe..4654a9934269 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1047,13 +1047,15 @@ out:
return ret;
}
-static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
+static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
{
struct rpc_rqst *req = ERR_PTR(-EAGAIN);
if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
goto out;
- req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
+ spin_unlock(&xprt->reserve_lock);
+ req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
+ spin_lock(&xprt->reserve_lock);
if (req != NULL)
goto out;
atomic_dec(&xprt->num_reqs);
@@ -1081,7 +1083,7 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
list_del(&req->rq_list);
goto out_init_req;
}
- req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
+ req = xprt_dynamic_alloc_slot(xprt);
if (!IS_ERR(req))
goto out_init_req;
switch (PTR_ERR(req)) {
diff --git a/net/sunrpc/xprtrdma/Makefile b/net/sunrpc/xprtrdma/Makefile
index c1ae8142ab73..b8213ddce2f2 100644
--- a/net/sunrpc/xprtrdma/Makefile
+++ b/net/sunrpc/xprtrdma/Makefile
@@ -3,6 +3,6 @@ obj-$(CONFIG_SUNRPC_XPRT_RDMA) += rpcrdma.o
rpcrdma-y := transport.o rpc_rdma.o verbs.o \
fmr_ops.o frwr_ops.o \
svc_rdma.o svc_rdma_backchannel.o svc_rdma_transport.o \
- svc_rdma_marshal.o svc_rdma_sendto.o svc_rdma_recvfrom.o \
- svc_rdma_rw.o module.o
+ svc_rdma_sendto.o svc_rdma_recvfrom.o svc_rdma_rw.o \
+ module.o
rpcrdma-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel.o
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index 59e64025ed96..d3f84bb1d443 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -91,7 +91,7 @@ __fmr_unmap(struct rpcrdma_mw *mw)
list_add(&mw->fmr.fm_mr->list, &l);
rc = ib_unmap_fmr(&l);
- list_del_init(&mw->fmr.fm_mr->list);
+ list_del(&mw->fmr.fm_mr->list);
return rc;
}
@@ -213,13 +213,11 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break;
}
- mw->mw_nents = i;
mw->mw_dir = rpcrdma_data_dir(writing);
- if (i == 0)
- goto out_dmamap_err;
- if (!ib_dma_map_sg(r_xprt->rx_ia.ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir))
+ mw->mw_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device,
+ mw->mw_sg, i, mw->mw_dir);
+ if (!mw->mw_nents)
goto out_dmamap_err;
for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++)
@@ -237,16 +235,18 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
return mw->mw_nents;
out_dmamap_err:
- pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
- mw->mw_sg, mw->mw_nents);
- rpcrdma_defer_mr_recovery(mw);
+ pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
+ mw->mw_sg, i);
+ rpcrdma_put_mw(r_xprt, mw);
return -EIO;
out_maperr:
pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
len, (unsigned long long)dma_pages[0],
pageoff, mw->mw_nents, rc);
- rpcrdma_defer_mr_recovery(mw);
+ ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+ mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ rpcrdma_put_mw(r_xprt, mw);
return -EIO;
}
@@ -255,24 +255,26 @@ out_maperr:
* Sleeps until it is safe for the host CPU to access the
* previously mapped memory regions.
*
- * Caller ensures that req->rl_registered is not empty.
+ * Caller ensures that @mws is not empty before the call. This
+ * function empties the list.
*/
static void
-fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
{
- struct rpcrdma_mw *mw, *tmp;
+ struct rpcrdma_mw *mw;
LIST_HEAD(unmap_list);
int rc;
- dprintk("RPC: %s: req %p\n", __func__, req);
-
/* ORDER: Invalidate all of the req's MRs first
*
* ib_unmap_fmr() is slow, so use a single call instead
* of one call per mapped FMR.
*/
- list_for_each_entry(mw, &req->rl_registered, mw_list)
+ list_for_each_entry(mw, mws, mw_list) {
+ dprintk("RPC: %s: unmapping fmr %p\n",
+ __func__, &mw->fmr);
list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
+ }
r_xprt->rx_stats.local_inv_needed++;
rc = ib_unmap_fmr(&unmap_list);
if (rc)
@@ -281,9 +283,11 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
/* ORDER: Now DMA unmap all of the req's MRs, and return
* them to the free MW list.
*/
- list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
- list_del_init(&mw->mw_list);
- list_del_init(&mw->fmr.fm_mr->list);
+ while (!list_empty(mws)) {
+ mw = rpcrdma_pop_mw(mws);
+ dprintk("RPC: %s: DMA unmapping fmr %p\n",
+ __func__, &mw->fmr);
+ list_del(&mw->fmr.fm_mr->list);
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mw->mw_sg, mw->mw_nents, mw->mw_dir);
rpcrdma_put_mw(r_xprt, mw);
@@ -294,8 +298,9 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
out_reset:
pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
- list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
- list_del_init(&mw->fmr.fm_mr->list);
+ while (!list_empty(mws)) {
+ mw = rpcrdma_pop_mw(mws);
+ list_del(&mw->fmr.fm_mr->list);
fmr_op_recover_mr(mw);
}
}
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index f81dd93176c0..6aea36a38bfd 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -277,7 +277,7 @@ __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
}
/**
- * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
+ * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
* @cq: completion queue (ignored)
* @wc: completed WR
*
@@ -298,7 +298,7 @@ frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
}
/**
- * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
+ * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
* @cq: completion queue (ignored)
* @wc: completed WR
*
@@ -319,7 +319,7 @@ frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
}
/**
- * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
+ * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
* @cq: completion queue (ignored)
* @wc: completed WR
*
@@ -355,7 +355,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
struct ib_mr *mr;
struct ib_reg_wr *reg_wr;
struct ib_send_wr *bad_wr;
- int rc, i, n, dma_nents;
+ int rc, i, n;
u8 key;
mw = NULL;
@@ -391,14 +391,10 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break;
}
- mw->mw_nents = i;
mw->mw_dir = rpcrdma_data_dir(writing);
- if (i == 0)
- goto out_dmamap_err;
- dma_nents = ib_dma_map_sg(ia->ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir);
- if (!dma_nents)
+ mw->mw_nents = ib_dma_map_sg(ia->ri_device, mw->mw_sg, i, mw->mw_dir);
+ if (!mw->mw_nents)
goto out_dmamap_err;
n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
@@ -436,13 +432,14 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
return mw->mw_nents;
out_dmamap_err:
- pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
- mw->mw_sg, mw->mw_nents);
- rpcrdma_defer_mr_recovery(mw);
+ pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
+ mw->mw_sg, i);
+ frmr->fr_state = FRMR_IS_INVALID;
+ rpcrdma_put_mw(r_xprt, mw);
return -EIO;
out_mapmr_err:
- pr_err("rpcrdma: failed to map mr %p (%u/%u)\n",
+ pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
frmr->fr_mr, n, mw->mw_nents);
rpcrdma_defer_mr_recovery(mw);
return -EIO;
@@ -458,21 +455,19 @@ out_senderr:
* Sleeps until it is safe for the host CPU to access the
* previously mapped memory regions.
*
- * Caller ensures that req->rl_registered is not empty.
+ * Caller ensures that @mws is not empty before the call. This
+ * function empties the list.
*/
static void
-frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
{
struct ib_send_wr *first, **prev, *last, *bad_wr;
- struct rpcrdma_rep *rep = req->rl_reply;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
struct rpcrdma_frmr *f;
struct rpcrdma_mw *mw;
int count, rc;
- dprintk("RPC: %s: req %p\n", __func__, req);
-
- /* ORDER: Invalidate all of the req's MRs first
+ /* ORDER: Invalidate all of the MRs first
*
* Chain the LOCAL_INV Work Requests and post them with
* a single ib_post_send() call.
@@ -480,11 +475,10 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
f = NULL;
count = 0;
prev = &first;
- list_for_each_entry(mw, &req->rl_registered, mw_list) {
+ list_for_each_entry(mw, mws, mw_list) {
mw->frmr.fr_state = FRMR_IS_INVALID;
- if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) &&
- (mw->mw_handle == rep->rr_inv_rkey))
+ if (mw->mw_flags & RPCRDMA_MW_F_RI)
continue;
f = &mw->frmr;
@@ -524,18 +518,19 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* unless ri_id->qp is a valid pointer.
*/
r_xprt->rx_stats.local_inv_needed++;
+ bad_wr = NULL;
rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
+ if (bad_wr != first)
+ wait_for_completion(&f->fr_linv_done);
if (rc)
goto reset_mrs;
- wait_for_completion(&f->fr_linv_done);
-
- /* ORDER: Now DMA unmap all of the req's MRs, and return
+ /* ORDER: Now DMA unmap all of the MRs, and return
* them to the free MW list.
*/
unmap:
- while (!list_empty(&req->rl_registered)) {
- mw = rpcrdma_pop_mw(&req->rl_registered);
+ while (!list_empty(mws)) {
+ mw = rpcrdma_pop_mw(mws);
dprintk("RPC: %s: DMA unmapping frmr %p\n",
__func__, &mw->frmr);
ib_dma_unmap_sg(ia->ri_device,
@@ -546,17 +541,19 @@ unmap:
reset_mrs:
pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
- rdma_disconnect(ia->ri_id);
/* Find and reset the MRs in the LOCAL_INV WRs that did not
- * get posted. This is synchronous, and slow.
+ * get posted.
*/
- list_for_each_entry(mw, &req->rl_registered, mw_list) {
- f = &mw->frmr;
- if (mw->mw_handle == bad_wr->ex.invalidate_rkey) {
- __frwr_reset_mr(ia, mw);
- bad_wr = bad_wr->next;
- }
+ rpcrdma_init_cqcount(&r_xprt->rx_ep, -count);
+ while (bad_wr) {
+ f = container_of(bad_wr, struct rpcrdma_frmr,
+ fr_invwr);
+ mw = container_of(f, struct rpcrdma_mw, frmr);
+
+ __frwr_reset_mr(ia, mw);
+
+ bad_wr = bad_wr->next;
}
goto unmap;
}
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 694e9b13ecf0..ca4d6e4528f3 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -141,7 +141,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
if (xdr->page_len) {
remaining = xdr->page_len;
- offset = xdr->page_base & ~PAGE_MASK;
+ offset = offset_in_page(xdr->page_base);
count = 0;
while (remaining) {
remaining -= min_t(unsigned int,
@@ -222,7 +222,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
len = xdrbuf->page_len;
ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
- page_base = xdrbuf->page_base & ~PAGE_MASK;
+ page_base = offset_in_page(xdrbuf->page_base);
p = 0;
while (len && n < RPCRDMA_MAX_SEGS) {
if (!ppages[p]) {
@@ -540,7 +540,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
goto out;
page = virt_to_page(xdr->tail[0].iov_base);
- page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
+ page_base = offset_in_page(xdr->tail[0].iov_base);
/* If the content in the page list is an odd length,
* xdr_write_pages() has added a pad at the beginning
@@ -557,7 +557,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
*/
if (xdr->page_len) {
ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
- page_base = xdr->page_base & ~PAGE_MASK;
+ page_base = offset_in_page(xdr->page_base);
remaining = xdr->page_len;
while (remaining) {
sge_no++;
@@ -587,7 +587,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
*/
if (xdr->tail[0].iov_len) {
page = virt_to_page(xdr->tail[0].iov_base);
- page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
+ page_base = offset_in_page(xdr->tail[0].iov_base);
len = xdr->tail[0].iov_len;
map_tail:
@@ -734,6 +734,9 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
rpclen = 0;
}
+ req->rl_xid = rqst->rq_xid;
+ rpcrdma_insert_req(&r_xprt->rx_buf, req);
+
/* This implementation supports the following combinations
* of chunk lists in one RPC-over-RDMA Call message:
*
@@ -875,9 +878,9 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
srcp += curlen;
copy_len -= curlen;
- page_base = rqst->rq_rcv_buf.page_base;
- ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
- page_base &= ~PAGE_MASK;
+ ppages = rqst->rq_rcv_buf.pages +
+ (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
+ page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
fixup_copy_count = 0;
if (copy_len && rqst->rq_rcv_buf.page_len) {
int pagelist_len;
@@ -928,6 +931,24 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
return fixup_copy_count;
}
+/* Caller must guarantee @rep remains stable during this call.
+ */
+static void
+rpcrdma_mark_remote_invalidation(struct list_head *mws,
+ struct rpcrdma_rep *rep)
+{
+ struct rpcrdma_mw *mw;
+
+ if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
+ return;
+
+ list_for_each_entry(mw, mws, mw_list)
+ if (mw->mw_handle == rep->rr_inv_rkey) {
+ mw->mw_flags = RPCRDMA_MW_F_RI;
+ break; /* only one invalidated MR per RPC */
+ }
+}
+
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
/* By convention, backchannel calls arrive via rdma_msg type
* messages, and never populate the chunk lists. This makes
@@ -969,14 +990,16 @@ rpcrdma_reply_handler(struct work_struct *work)
{
struct rpcrdma_rep *rep =
container_of(work, struct rpcrdma_rep, rr_work);
+ struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ struct rpc_xprt *xprt = &r_xprt->rx_xprt;
struct rpcrdma_msg *headerp;
struct rpcrdma_req *req;
struct rpc_rqst *rqst;
- struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
- struct rpc_xprt *xprt = &r_xprt->rx_xprt;
__be32 *iptr;
int rdmalen, status, rmerr;
unsigned long cwnd;
+ struct list_head mws;
dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
@@ -994,27 +1017,45 @@ rpcrdma_reply_handler(struct work_struct *work)
/* Match incoming rpcrdma_rep to an rpcrdma_req to
* get context for handling any incoming chunks.
*/
- spin_lock_bh(&xprt->transport_lock);
- rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
- if (!rqst)
+ spin_lock(&buf->rb_lock);
+ req = rpcrdma_lookup_req_locked(&r_xprt->rx_buf,
+ headerp->rm_xid);
+ if (!req)
goto out_nomatch;
-
- req = rpcr_to_rdmar(rqst);
if (req->rl_reply)
goto out_duplicate;
- /* Sanity checking has passed. We are now committed
- * to complete this transaction.
+ list_replace_init(&req->rl_registered, &mws);
+ rpcrdma_mark_remote_invalidation(&mws, rep);
+
+ /* Avoid races with signals and duplicate replies
+ * by marking this req as matched.
*/
- list_del_init(&rqst->rq_list);
- spin_unlock_bh(&xprt->transport_lock);
+ req->rl_reply = rep;
+ spin_unlock(&buf->rb_lock);
+
dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
__func__, rep, req, be32_to_cpu(headerp->rm_xid));
- /* from here on, the reply is no longer an orphan */
- req->rl_reply = rep;
- xprt->reestablish_timeout = 0;
+ /* Invalidate and unmap the data payloads before waking the
+ * waiting application. This guarantees the memory regions
+ * are properly fenced from the server before the application
+ * accesses the data. It also ensures proper send flow control:
+ * waking the next RPC waits until this RPC has relinquished
+ * all its Send Queue entries.
+ */
+ if (!list_empty(&mws))
+ r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &mws);
+ /* Perform XID lookup, reconstruction of the RPC reply, and
+ * RPC completion while holding the transport lock to ensure
+ * the rep, rqst, and rq_task pointers remain stable.
+ */
+ spin_lock_bh(&xprt->transport_lock);
+ rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
+ if (!rqst)
+ goto out_norqst;
+ xprt->reestablish_timeout = 0;
if (headerp->rm_vers != rpcrdma_version)
goto out_badversion;
@@ -1024,12 +1065,9 @@ rpcrdma_reply_handler(struct work_struct *work)
case rdma_msg:
/* never expect read chunks */
/* never expect reply chunks (two ways to check) */
- /* never expect write chunks without having offered RDMA */
if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
(headerp->rm_body.rm_chunks[1] == xdr_zero &&
- headerp->rm_body.rm_chunks[2] != xdr_zero) ||
- (headerp->rm_body.rm_chunks[1] != xdr_zero &&
- list_empty(&req->rl_registered)))
+ headerp->rm_body.rm_chunks[2] != xdr_zero))
goto badheader;
if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
/* count any expected write chunks in read reply */
@@ -1066,8 +1104,7 @@ rpcrdma_reply_handler(struct work_struct *work)
/* never expect read or write chunks, always reply chunks */
if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
headerp->rm_body.rm_chunks[1] != xdr_zero ||
- headerp->rm_body.rm_chunks[2] != xdr_one ||
- list_empty(&req->rl_registered))
+ headerp->rm_body.rm_chunks[2] != xdr_one)
goto badheader;
iptr = (__be32 *)((unsigned char *)headerp +
RPCRDMA_HDRLEN_MIN);
@@ -1093,17 +1130,6 @@ badheader:
}
out:
- /* Invalidate and flush the data payloads before waking the
- * waiting application. This guarantees the memory region is
- * properly fenced from the server before the application
- * accesses the data. It also ensures proper send flow
- * control: waking the next RPC waits until this RPC has
- * relinquished all its Send Queue entries.
- */
- if (!list_empty(&req->rl_registered))
- r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
-
- spin_lock_bh(&xprt->transport_lock);
cwnd = xprt->cwnd;
xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
if (xprt->cwnd > cwnd)
@@ -1112,7 +1138,7 @@ out:
xprt_complete_rqst(rqst->rq_task, status);
spin_unlock_bh(&xprt->transport_lock);
dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
- __func__, xprt, rqst, status);
+ __func__, xprt, rqst, status);
return;
out_badstatus:
@@ -1161,26 +1187,37 @@ out_rdmaerr:
r_xprt->rx_stats.bad_reply_count++;
goto out;
-/* If no pending RPC transaction was matched, post a replacement
- * receive buffer before returning.
+/* The req was still available, but by the time the transport_lock
+ * was acquired, the rqst and task had been released. Thus the RPC
+ * has already been terminated.
*/
+out_norqst:
+ spin_unlock_bh(&xprt->transport_lock);
+ rpcrdma_buffer_put(req);
+ dprintk("RPC: %s: race, no rqst left for req %p\n",
+ __func__, req);
+ return;
+
out_shortreply:
dprintk("RPC: %s: short/invalid reply\n", __func__);
goto repost;
out_nomatch:
- spin_unlock_bh(&xprt->transport_lock);
+ spin_unlock(&buf->rb_lock);
dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n",
__func__, be32_to_cpu(headerp->rm_xid),
rep->rr_len);
goto repost;
out_duplicate:
- spin_unlock_bh(&xprt->transport_lock);
+ spin_unlock(&buf->rb_lock);
dprintk("RPC: %s: "
"duplicate reply %p to RPC request %p: xid 0x%08x\n",
__func__, rep, req, be32_to_cpu(headerp->rm_xid));
+/* If no pending RPC transaction was matched, post a replacement
+ * receive buffer before returning.
+ */
repost:
r_xprt->rx_stats.bad_reply_count++;
if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
deleted file mode 100644
index bdcf7d85a3dc..000000000000
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2016 Oracle. All rights reserved.
- * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the BSD-type
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * Neither the name of the Network Appliance, Inc. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Tom Tucker <tom@opengridcomputing.com>
- */
-
-#include <linux/sunrpc/xdr.h>
-#include <linux/sunrpc/debug.h>
-#include <asm/unaligned.h>
-#include <linux/sunrpc/rpc_rdma.h>
-#include <linux/sunrpc/svc_rdma.h>
-
-#define RPCDBG_FACILITY RPCDBG_SVCXPRT
-
-static __be32 *xdr_check_read_list(__be32 *p, __be32 *end)
-{
- __be32 *next;
-
- while (*p++ != xdr_zero) {
- next = p + rpcrdma_readchunk_maxsz - 1;
- if (next > end)
- return NULL;
- p = next;
- }
- return p;
-}
-
-static __be32 *xdr_check_write_list(__be32 *p, __be32 *end)
-{
- __be32 *next;
-
- while (*p++ != xdr_zero) {
- next = p + 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
- if (next > end)
- return NULL;
- p = next;
- }
- return p;
-}
-
-static __be32 *xdr_check_reply_chunk(__be32 *p, __be32 *end)
-{
- __be32 *next;
-
- if (*p++ != xdr_zero) {
- next = p + 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
- if (next > end)
- return NULL;
- p = next;
- }
- return p;
-}
-
-/**
- * svc_rdma_xdr_decode_req - Parse incoming RPC-over-RDMA header
- * @rq_arg: Receive buffer
- *
- * On entry, xdr->head[0].iov_base points to first byte in the
- * RPC-over-RDMA header.
- *
- * On successful exit, head[0] points to first byte past the
- * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
- * The length of the RPC-over-RDMA header is returned.
- */
-int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
-{
- __be32 *p, *end, *rdma_argp;
- unsigned int hdr_len;
-
- /* Verify that there's enough bytes for header + something */
- if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
- goto out_short;
-
- rdma_argp = rq_arg->head[0].iov_base;
- if (*(rdma_argp + 1) != rpcrdma_version)
- goto out_version;
-
- switch (*(rdma_argp + 3)) {
- case rdma_msg:
- case rdma_nomsg:
- break;
-
- case rdma_done:
- goto out_drop;
-
- case rdma_error:
- goto out_drop;
-
- default:
- goto out_proc;
- }
-
- end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
- p = xdr_check_read_list(rdma_argp + 4, end);
- if (!p)
- goto out_inval;
- p = xdr_check_write_list(p, end);
- if (!p)
- goto out_inval;
- p = xdr_check_reply_chunk(p, end);
- if (!p)
- goto out_inval;
- if (p > end)
- goto out_inval;
-
- rq_arg->head[0].iov_base = p;
- hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
- rq_arg->head[0].iov_len -= hdr_len;
- return hdr_len;
-
-out_short:
- dprintk("svcrdma: header too short = %d\n", rq_arg->len);
- return -EINVAL;
-
-out_version:
- dprintk("svcrdma: bad xprt version: %u\n",
- be32_to_cpup(rdma_argp + 1));
- return -EPROTONOSUPPORT;
-
-out_drop:
- dprintk("svcrdma: dropping RDMA_DONE/ERROR message\n");
- return 0;
-
-out_proc:
- dprintk("svcrdma: bad rdma procedure (%u)\n",
- be32_to_cpup(rdma_argp + 3));
- return -EINVAL;
-
-out_inval:
- dprintk("svcrdma: failed to parse transport header\n");
- return -EINVAL;
-}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 27a99bf5b1a6..ad4bd62eebf1 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016, 2017 Oracle. All rights reserved.
* Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
* Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
*
@@ -40,12 +41,66 @@
* Author: Tom Tucker <tom@opengridcomputing.com>
*/
-#include <linux/sunrpc/debug.h>
-#include <linux/sunrpc/rpc_rdma.h>
-#include <linux/spinlock.h>
+/* Operation
+ *
+ * The main entry point is svc_rdma_recvfrom. This is called from
+ * svc_recv when the transport indicates there is incoming data to
+ * be read. "Data Ready" is signaled when an RDMA Receive completes,
+ * or when a set of RDMA Reads complete.
+ *
+ * An svc_rqst is passed in. This structure contains an array of
+ * free pages (rq_pages) that will contain the incoming RPC message.
+ *
+ * Short messages are moved directly into svc_rqst::rq_arg, and
+ * the RPC Call is ready to be processed by the Upper Layer.
+ * svc_rdma_recvfrom returns the length of the RPC Call message,
+ * completing the reception of the RPC Call.
+ *
+ * However, when an incoming message has Read chunks,
+ * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
+ * data payload from the client. svc_rdma_recvfrom sets up the
+ * RDMA Reads using pages in svc_rqst::rq_pages, which are
+ * transferred to an svc_rdma_op_ctxt for the duration of the
+ * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
+ * is still not yet ready.
+ *
+ * When the Read chunk payloads have become available on the
+ * server, "Data Ready" is raised again, and svc_recv calls
+ * svc_rdma_recvfrom again. This second call may use a different
+ * svc_rqst than the first one, thus any information that needs
+ * to be preserved across these two calls is kept in an
+ * svc_rdma_op_ctxt.
+ *
+ * The second call to svc_rdma_recvfrom performs final assembly
+ * of the RPC Call message, using the RDMA Read sink pages kept in
+ * the svc_rdma_op_ctxt. The xdr_buf is copied from the
+ * svc_rdma_op_ctxt to the second svc_rqst. The second call returns
+ * the length of the completed RPC Call message.
+ *
+ * Page Management
+ *
+ * Pages under I/O must be transferred from the first svc_rqst to an
+ * svc_rdma_op_ctxt before the first svc_rdma_recvfrom call returns.
+ *
+ * The first svc_rqst supplies pages for RDMA Reads. These are moved
+ * from rqstp::rq_pages into ctxt::pages. The consumed elements of
+ * the rq_pages array are set to NULL and refilled with the first
+ * svc_rdma_recvfrom call returns.
+ *
+ * During the second svc_rdma_recvfrom call, RDMA Read sink pages
+ * are transferred from the svc_rdma_op_ctxt to the second svc_rqst
+ * (see rdma_read_complete() below).
+ */
+
#include <asm/unaligned.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
+
+#include <linux/spinlock.h>
+
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/debug.h>
+#include <linux/sunrpc/rpc_rdma.h>
#include <linux/sunrpc/svc_rdma.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
@@ -59,7 +114,6 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
struct svc_rdma_op_ctxt *ctxt,
u32 byte_count)
{
- struct rpcrdma_msg *rmsgp;
struct page *page;
u32 bc;
int sge_no;
@@ -83,20 +137,12 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
rqstp->rq_arg.page_len = bc;
rqstp->rq_arg.page_base = 0;
- /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
- rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
- if (rmsgp->rm_type == rdma_nomsg)
- rqstp->rq_arg.pages = &rqstp->rq_pages[0];
- else
- rqstp->rq_arg.pages = &rqstp->rq_pages[1];
-
sge_no = 1;
while (bc && sge_no < ctxt->count) {
page = ctxt->pages[sge_no];
put_page(rqstp->rq_pages[sge_no]);
rqstp->rq_pages[sge_no] = page;
bc -= min_t(u32, bc, ctxt->sge[sge_no].length);
- rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
sge_no++;
}
rqstp->rq_respages = &rqstp->rq_pages[sge_no];
@@ -115,406 +161,208 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
rqstp->rq_arg.tail[0].iov_len = 0;
}
-/* Issue an RDMA_READ using the local lkey to map the data sink */
-int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
- struct svc_rqst *rqstp,
- struct svc_rdma_op_ctxt *head,
- int *page_no,
- u32 *page_offset,
- u32 rs_handle,
- u32 rs_length,
- u64 rs_offset,
- bool last)
-{
- struct ib_rdma_wr read_wr;
- int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
- struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
- int ret, read, pno;
- u32 pg_off = *page_offset;
- u32 pg_no = *page_no;
-
- ctxt->direction = DMA_FROM_DEVICE;
- ctxt->read_hdr = head;
- pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
- read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
- rs_length);
-
- for (pno = 0; pno < pages_needed; pno++) {
- int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
-
- head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
- head->arg.page_len += len;
-
- head->arg.len += len;
- if (!pg_off)
- head->count++;
- rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
- rqstp->rq_next_page = rqstp->rq_respages + 1;
- ctxt->sge[pno].addr =
- ib_dma_map_page(xprt->sc_cm_id->device,
- head->arg.pages[pg_no], pg_off,
- PAGE_SIZE - pg_off,
- DMA_FROM_DEVICE);
- ret = ib_dma_mapping_error(xprt->sc_cm_id->device,
- ctxt->sge[pno].addr);
- if (ret)
- goto err;
- svc_rdma_count_mappings(xprt, ctxt);
-
- ctxt->sge[pno].lkey = xprt->sc_pd->local_dma_lkey;
- ctxt->sge[pno].length = len;
- ctxt->count++;
-
- /* adjust offset and wrap to next page if needed */
- pg_off += len;
- if (pg_off == PAGE_SIZE) {
- pg_off = 0;
- pg_no++;
- }
- rs_length -= len;
- }
-
- if (last && rs_length == 0)
- set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
- else
- clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
-
- memset(&read_wr, 0, sizeof(read_wr));
- ctxt->cqe.done = svc_rdma_wc_read;
- read_wr.wr.wr_cqe = &ctxt->cqe;
- read_wr.wr.opcode = IB_WR_RDMA_READ;
- read_wr.wr.send_flags = IB_SEND_SIGNALED;
- read_wr.rkey = rs_handle;
- read_wr.remote_addr = rs_offset;
- read_wr.wr.sg_list = ctxt->sge;
- read_wr.wr.num_sge = pages_needed;
-
- ret = svc_rdma_send(xprt, &read_wr.wr);
- if (ret) {
- pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
- set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
- goto err;
- }
+/* This accommodates the largest possible Write chunk,
+ * in one segment.
+ */
+#define MAX_BYTES_WRITE_SEG ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
- /* return current location in page array */
- *page_no = pg_no;
- *page_offset = pg_off;
- ret = read;
- atomic_inc(&rdma_stat_read);
- return ret;
- err:
- svc_rdma_unmap_dma(ctxt);
- svc_rdma_put_context(ctxt, 0);
- return ret;
-}
+/* This accommodates the largest possible Position-Zero
+ * Read chunk or Reply chunk, in one segment.
+ */
+#define MAX_BYTES_SPECIAL_SEG ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
-/* Issue an RDMA_READ using an FRMR to map the data sink */
-int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
- struct svc_rqst *rqstp,
- struct svc_rdma_op_ctxt *head,
- int *page_no,
- u32 *page_offset,
- u32 rs_handle,
- u32 rs_length,
- u64 rs_offset,
- bool last)
+/* Sanity check the Read list.
+ *
+ * Implementation limits:
+ * - This implementation supports only one Read chunk.
+ *
+ * Sanity checks:
+ * - Read list does not overflow buffer.
+ * - Segment size limited by largest NFS data payload.
+ *
+ * The segment count is limited to how many segments can
+ * fit in the transport header without overflowing the
+ * buffer. That's about 40 Read segments for a 1KB inline
+ * threshold.
+ *
+ * Returns pointer to the following Write list.
+ */
+static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
{
- struct ib_rdma_wr read_wr;
- struct ib_send_wr inv_wr;
- struct ib_reg_wr reg_wr;
- u8 key;
- int nents = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
- struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
- struct svc_rdma_fastreg_mr *frmr = svc_rdma_get_frmr(xprt);
- int ret, read, pno, dma_nents, n;
- u32 pg_off = *page_offset;
- u32 pg_no = *page_no;
-
- if (IS_ERR(frmr))
- return -ENOMEM;
-
- ctxt->direction = DMA_FROM_DEVICE;
- ctxt->frmr = frmr;
- nents = min_t(unsigned int, nents, xprt->sc_frmr_pg_list_len);
- read = min_t(int, (nents << PAGE_SHIFT) - *page_offset, rs_length);
-
- frmr->direction = DMA_FROM_DEVICE;
- frmr->access_flags = (IB_ACCESS_LOCAL_WRITE|IB_ACCESS_REMOTE_WRITE);
- frmr->sg_nents = nents;
-
- for (pno = 0; pno < nents; pno++) {
- int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
-
- head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
- head->arg.page_len += len;
- head->arg.len += len;
- if (!pg_off)
- head->count++;
-
- sg_set_page(&frmr->sg[pno], rqstp->rq_arg.pages[pg_no],
- len, pg_off);
-
- rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
- rqstp->rq_next_page = rqstp->rq_respages + 1;
-
- /* adjust offset and wrap to next page if needed */
- pg_off += len;
- if (pg_off == PAGE_SIZE) {
- pg_off = 0;
- pg_no++;
+ u32 position;
+ bool first;
+
+ first = true;
+ while (*p++ != xdr_zero) {
+ if (first) {
+ position = be32_to_cpup(p++);
+ first = false;
+ } else if (be32_to_cpup(p++) != position) {
+ return NULL;
}
- rs_length -= len;
- }
+ p++; /* handle */
+ if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG)
+ return NULL;
+ p += 2; /* offset */
- if (last && rs_length == 0)
- set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
- else
- clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
-
- dma_nents = ib_dma_map_sg(xprt->sc_cm_id->device,
- frmr->sg, frmr->sg_nents,
- frmr->direction);
- if (!dma_nents) {
- pr_err("svcrdma: failed to dma map sg %p\n",
- frmr->sg);
- return -ENOMEM;
+ if (p > end)
+ return NULL;
}
+ return p;
+}
- n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, NULL, PAGE_SIZE);
- if (unlikely(n != frmr->sg_nents)) {
- pr_err("svcrdma: failed to map mr %p (%d/%d elements)\n",
- frmr->mr, n, frmr->sg_nents);
- return n < 0 ? n : -EINVAL;
- }
+/* The segment count is limited to how many segments can
+ * fit in the transport header without overflowing the
+ * buffer. That's about 60 Write segments for a 1KB inline
+ * threshold.
+ */
+static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end,
+ u32 maxlen)
+{
+ u32 i, segcount;
- /* Bump the key */
- key = (u8)(frmr->mr->lkey & 0x000000FF);
- ib_update_fast_reg_key(frmr->mr, ++key);
-
- ctxt->sge[0].addr = frmr->mr->iova;
- ctxt->sge[0].lkey = frmr->mr->lkey;
- ctxt->sge[0].length = frmr->mr->length;
- ctxt->count = 1;
- ctxt->read_hdr = head;
-
- /* Prepare REG WR */
- ctxt->reg_cqe.done = svc_rdma_wc_reg;
- reg_wr.wr.wr_cqe = &ctxt->reg_cqe;
- reg_wr.wr.opcode = IB_WR_REG_MR;
- reg_wr.wr.send_flags = IB_SEND_SIGNALED;
- reg_wr.wr.num_sge = 0;
- reg_wr.mr = frmr->mr;
- reg_wr.key = frmr->mr->lkey;
- reg_wr.access = frmr->access_flags;
- reg_wr.wr.next = &read_wr.wr;
-
- /* Prepare RDMA_READ */
- memset(&read_wr, 0, sizeof(read_wr));
- ctxt->cqe.done = svc_rdma_wc_read;
- read_wr.wr.wr_cqe = &ctxt->cqe;
- read_wr.wr.send_flags = IB_SEND_SIGNALED;
- read_wr.rkey = rs_handle;
- read_wr.remote_addr = rs_offset;
- read_wr.wr.sg_list = ctxt->sge;
- read_wr.wr.num_sge = 1;
- if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) {
- read_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
- read_wr.wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;
- } else {
- read_wr.wr.opcode = IB_WR_RDMA_READ;
- read_wr.wr.next = &inv_wr;
- /* Prepare invalidate */
- memset(&inv_wr, 0, sizeof(inv_wr));
- ctxt->inv_cqe.done = svc_rdma_wc_inv;
- inv_wr.wr_cqe = &ctxt->inv_cqe;
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE;
- inv_wr.ex.invalidate_rkey = frmr->mr->lkey;
- }
+ segcount = be32_to_cpup(p++);
+ for (i = 0; i < segcount; i++) {
+ p++; /* handle */
+ if (be32_to_cpup(p++) > maxlen)
+ return NULL;
+ p += 2; /* offset */
- /* Post the chain */
- ret = svc_rdma_send(xprt, &reg_wr.wr);
- if (ret) {
- pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
- set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
- goto err;
+ if (p > end)
+ return NULL;
}
- /* return current location in page array */
- *page_no = pg_no;
- *page_offset = pg_off;
- ret = read;
- atomic_inc(&rdma_stat_read);
- return ret;
- err:
- svc_rdma_put_context(ctxt, 0);
- svc_rdma_put_frmr(xprt, frmr);
- return ret;
-}
-
-static unsigned int
-rdma_rcl_chunk_count(struct rpcrdma_read_chunk *ch)
-{
- unsigned int count;
-
- for (count = 0; ch->rc_discrim != xdr_zero; ch++)
- count++;
- return count;
+ return p;
}
-/* If there was additional inline content, append it to the end of arg.pages.
- * Tail copy has to be done after the reader function has determined how many
- * pages are needed for RDMA READ.
+/* Sanity check the Write list.
+ *
+ * Implementation limits:
+ * - This implementation supports only one Write chunk.
+ *
+ * Sanity checks:
+ * - Write list does not overflow buffer.
+ * - Segment size limited by largest NFS data payload.
+ *
+ * Returns pointer to the following Reply chunk.
*/
-static int
-rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head,
- u32 position, u32 byte_count, u32 page_offset, int page_no)
+static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end)
{
- char *srcp, *destp;
-
- srcp = head->arg.head[0].iov_base + position;
- byte_count = head->arg.head[0].iov_len - position;
- if (byte_count > PAGE_SIZE) {
- dprintk("svcrdma: large tail unsupported\n");
- return 0;
- }
-
- /* Fit as much of the tail on the current page as possible */
- if (page_offset != PAGE_SIZE) {
- destp = page_address(rqstp->rq_arg.pages[page_no]);
- destp += page_offset;
- while (byte_count--) {
- *destp++ = *srcp++;
- page_offset++;
- if (page_offset == PAGE_SIZE && byte_count)
- goto more;
- }
- goto done;
+ u32 chcount;
+
+ chcount = 0;
+ while (*p++ != xdr_zero) {
+ p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG);
+ if (!p)
+ return NULL;
+ if (chcount++ > 1)
+ return NULL;
}
-
-more:
- /* Fit the rest on the next page */
- page_no++;
- destp = page_address(rqstp->rq_arg.pages[page_no]);
- while (byte_count--)
- *destp++ = *srcp++;
-
- rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
- rqstp->rq_next_page = rqstp->rq_respages + 1;
-
-done:
- byte_count = head->arg.head[0].iov_len - position;
- head->arg.page_len += byte_count;
- head->arg.len += byte_count;
- head->arg.buflen += byte_count;
- return 1;
+ return p;
}
-/* Returns the address of the first read chunk or <nul> if no read chunk
- * is present
+/* Sanity check the Reply chunk.
+ *
+ * Sanity checks:
+ * - Reply chunk does not overflow buffer.
+ * - Segment size limited by largest NFS data payload.
+ *
+ * Returns pointer to the following RPC header.
*/
-static struct rpcrdma_read_chunk *
-svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
+static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end)
{
- struct rpcrdma_read_chunk *ch =
- (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
-
- if (ch->rc_discrim == xdr_zero)
- return NULL;
- return ch;
+ if (*p++ != xdr_zero) {
+ p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG);
+ if (!p)
+ return NULL;
+ }
+ return p;
}
-static int rdma_read_chunks(struct svcxprt_rdma *xprt,
- struct rpcrdma_msg *rmsgp,
- struct svc_rqst *rqstp,
- struct svc_rdma_op_ctxt *head)
+/* On entry, xdr->head[0].iov_base points to first byte in the
+ * RPC-over-RDMA header.
+ *
+ * On successful exit, head[0] points to first byte past the
+ * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
+ * The length of the RPC-over-RDMA header is returned.
+ *
+ * Assumptions:
+ * - The transport header is entirely contained in the head iovec.
+ */
+static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
{
- int page_no, ret;
- struct rpcrdma_read_chunk *ch;
- u32 handle, page_offset, byte_count;
- u32 position;
- u64 rs_offset;
- bool last;
-
- /* If no read list is present, return 0 */
- ch = svc_rdma_get_read_chunk(rmsgp);
- if (!ch)
- return 0;
+ __be32 *p, *end, *rdma_argp;
+ unsigned int hdr_len;
+ char *proc;
+
+ /* Verify that there's enough bytes for header + something */
+ if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
+ goto out_short;
+
+ rdma_argp = rq_arg->head[0].iov_base;
+ if (*(rdma_argp + 1) != rpcrdma_version)
+ goto out_version;
+
+ switch (*(rdma_argp + 3)) {
+ case rdma_msg:
+ proc = "RDMA_MSG";
+ break;
+ case rdma_nomsg:
+ proc = "RDMA_NOMSG";
+ break;
+
+ case rdma_done:
+ goto out_drop;
- if (rdma_rcl_chunk_count(ch) > RPCSVC_MAXPAGES)
- return -EINVAL;
-
- /* The request is completed when the RDMA_READs complete. The
- * head context keeps all the pages that comprise the
- * request.
- */
- head->arg.head[0] = rqstp->rq_arg.head[0];
- head->arg.tail[0] = rqstp->rq_arg.tail[0];
- head->hdr_count = head->count;
- head->arg.page_base = 0;
- head->arg.page_len = 0;
- head->arg.len = rqstp->rq_arg.len;
- head->arg.buflen = rqstp->rq_arg.buflen;
-
- /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
- position = be32_to_cpu(ch->rc_position);
- if (position == 0) {
- head->arg.pages = &head->pages[0];
- page_offset = head->byte_len;
- } else {
- head->arg.pages = &head->pages[head->count];
- page_offset = 0;
- }
+ case rdma_error:
+ goto out_drop;
- ret = 0;
- page_no = 0;
- for (; ch->rc_discrim != xdr_zero; ch++) {
- if (be32_to_cpu(ch->rc_position) != position)
- goto err;
-
- handle = be32_to_cpu(ch->rc_target.rs_handle),
- byte_count = be32_to_cpu(ch->rc_target.rs_length);
- xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
- &rs_offset);
-
- while (byte_count > 0) {
- last = (ch + 1)->rc_discrim == xdr_zero;
- ret = xprt->sc_reader(xprt, rqstp, head,
- &page_no, &page_offset,
- handle, byte_count,
- rs_offset, last);
- if (ret < 0)
- goto err;
- byte_count -= ret;
- rs_offset += ret;
- head->arg.buflen += ret;
- }
+ default:
+ goto out_proc;
}
- /* Read list may need XDR round-up (see RFC 5666, s. 3.7) */
- if (page_offset & 3) {
- u32 pad = 4 - (page_offset & 3);
-
- head->arg.tail[0].iov_len += pad;
- head->arg.len += pad;
- head->arg.buflen += pad;
- page_offset += pad;
- }
+ end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
+ p = xdr_check_read_list(rdma_argp + 4, end);
+ if (!p)
+ goto out_inval;
+ p = xdr_check_write_list(p, end);
+ if (!p)
+ goto out_inval;
+ p = xdr_check_reply_chunk(p, end);
+ if (!p)
+ goto out_inval;
+ if (p > end)
+ goto out_inval;
+
+ rq_arg->head[0].iov_base = p;
+ hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
+ rq_arg->head[0].iov_len -= hdr_len;
+ rq_arg->len -= hdr_len;
+ dprintk("svcrdma: received %s request for XID 0x%08x, hdr_len=%u\n",
+ proc, be32_to_cpup(rdma_argp), hdr_len);
+ return hdr_len;
+
+out_short:
+ dprintk("svcrdma: header too short = %d\n", rq_arg->len);
+ return -EINVAL;
+
+out_version:
+ dprintk("svcrdma: bad xprt version: %u\n",
+ be32_to_cpup(rdma_argp + 1));
+ return -EPROTONOSUPPORT;
- ret = 1;
- if (position && position < head->arg.head[0].iov_len)
- ret = rdma_copy_tail(rqstp, head, position,
- byte_count, page_offset, page_no);
- head->arg.head[0].iov_len = position;
- head->position = position;
+out_drop:
+ dprintk("svcrdma: dropping RDMA_DONE/ERROR message\n");
+ return 0;
- err:
- /* Detach arg pages. svc_recv will replenish them */
- for (page_no = 0;
- &rqstp->rq_pages[page_no] < rqstp->rq_respages; page_no++)
- rqstp->rq_pages[page_no] = NULL;
+out_proc:
+ dprintk("svcrdma: bad rdma procedure (%u)\n",
+ be32_to_cpup(rdma_argp + 3));
+ return -EINVAL;
- return ret;
+out_inval:
+ dprintk("svcrdma: failed to parse transport header\n");
+ return -EINVAL;
}
static void rdma_read_complete(struct svc_rqst *rqstp,
@@ -528,24 +376,9 @@ static void rdma_read_complete(struct svc_rqst *rqstp,
rqstp->rq_pages[page_no] = head->pages[page_no];
}
- /* Adjustments made for RDMA_NOMSG type requests */
- if (head->position == 0) {
- if (head->arg.len <= head->sge[0].length) {
- head->arg.head[0].iov_len = head->arg.len -
- head->byte_len;
- head->arg.page_len = 0;
- } else {
- head->arg.head[0].iov_len = head->sge[0].length -
- head->byte_len;
- head->arg.page_len = head->arg.len -
- head->sge[0].length;
- }
- }
-
/* Point rq_arg.pages past header */
rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
rqstp->rq_arg.page_len = head->arg.page_len;
- rqstp->rq_arg.page_base = head->arg.page_base;
/* rq_respages starts after the last arg page */
rqstp->rq_respages = &rqstp->rq_pages[page_no];
@@ -642,21 +475,44 @@ static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt,
return true;
}
-/*
- * Set up the rqstp thread context to point to the RQ buffer. If
- * necessary, pull additional data from the client with an RDMA_READ
- * request.
+/**
+ * svc_rdma_recvfrom - Receive an RPC call
+ * @rqstp: request structure into which to receive an RPC Call
+ *
+ * Returns:
+ * The positive number of bytes in the RPC Call message,
+ * %0 if there were no Calls ready to return,
+ * %-EINVAL if the Read chunk data is too large,
+ * %-ENOMEM if rdma_rw context pool was exhausted,
+ * %-ENOTCONN if posting failed (connection is lost),
+ * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
+ *
+ * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
+ * when there are no remaining ctxt's to process.
+ *
+ * The next ctxt is removed from the "receive" lists.
+ *
+ * - If the ctxt completes a Read, then finish assembling the Call
+ * message and return the number of bytes in the message.
+ *
+ * - If the ctxt completes a Receive, then construct the Call
+ * message from the contents of the Receive buffer.
+ *
+ * - If there are no Read chunks in this message, then finish
+ * assembling the Call message and return the number of bytes
+ * in the message.
+ *
+ * - If there are Read chunks in this message, post Read WRs to
+ * pull that payload and return 0.
*/
int svc_rdma_recvfrom(struct svc_rqst *rqstp)
{
struct svc_xprt *xprt = rqstp->rq_xprt;
struct svcxprt_rdma *rdma_xprt =
container_of(xprt, struct svcxprt_rdma, sc_xprt);
- struct svc_rdma_op_ctxt *ctxt = NULL;
- struct rpcrdma_msg *rmsgp;
- int ret = 0;
-
- dprintk("svcrdma: rqstp=%p\n", rqstp);
+ struct svc_rdma_op_ctxt *ctxt;
+ __be32 *p;
+ int ret;
spin_lock(&rdma_xprt->sc_rq_dto_lock);
if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
@@ -671,22 +527,14 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
struct svc_rdma_op_ctxt, list);
list_del(&ctxt->list);
} else {
- atomic_inc(&rdma_stat_rq_starve);
+ /* No new incoming requests, terminate the loop */
clear_bit(XPT_DATA, &xprt->xpt_flags);
- ctxt = NULL;
+ spin_unlock(&rdma_xprt->sc_rq_dto_lock);
+ return 0;
}
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
- if (!ctxt) {
- /* This is the EAGAIN path. The svc_recv routine will
- * return -EAGAIN, the nfsd thread will go to call into
- * svc_recv again and we shouldn't be on the active
- * transport list
- */
- if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
- goto defer;
- goto out;
- }
- dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p\n",
+
+ dprintk("svcrdma: recvfrom: ctxt=%p on xprt=%p, rqstp=%p\n",
ctxt, rdma_xprt, rqstp);
atomic_inc(&rdma_stat_recv);
@@ -694,7 +542,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
/* Decode the RDMA header. */
- rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
+ p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
if (ret < 0)
goto out_err;
@@ -702,9 +550,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
goto out_drop;
rqstp->rq_xprt_hlen = ret;
- if (svc_rdma_is_backchannel_reply(xprt, &rmsgp->rm_xid)) {
- ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt,
- &rmsgp->rm_xid,
+ if (svc_rdma_is_backchannel_reply(xprt, p)) {
+ ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
&rqstp->rq_arg);
svc_rdma_put_context(ctxt, 0);
if (ret)
@@ -712,39 +559,34 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
return ret;
}
- /* Read read-list data. */
- ret = rdma_read_chunks(rdma_xprt, rmsgp, rqstp, ctxt);
- if (ret > 0) {
- /* read-list posted, defer until data received from client. */
- goto defer;
- } else if (ret < 0) {
- /* Post of read-list failed, free context. */
- svc_rdma_put_context(ctxt, 1);
- return 0;
- }
+ p += rpcrdma_fixed_maxsz;
+ if (*p != xdr_zero)
+ goto out_readchunk;
complete:
- ret = rqstp->rq_arg.head[0].iov_len
- + rqstp->rq_arg.page_len
- + rqstp->rq_arg.tail[0].iov_len;
svc_rdma_put_context(ctxt, 0);
- out:
- dprintk("svcrdma: ret=%d, rq_arg.len=%u, "
- "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n",
- ret, rqstp->rq_arg.len,
- rqstp->rq_arg.head[0].iov_base,
- rqstp->rq_arg.head[0].iov_len);
+ dprintk("svcrdma: recvfrom: xprt=%p, rqstp=%p, rq_arg.len=%u\n",
+ rdma_xprt, rqstp, rqstp->rq_arg.len);
rqstp->rq_prot = IPPROTO_MAX;
svc_xprt_copy_addrs(rqstp, xprt);
- return ret;
+ return rqstp->rq_arg.len;
+
+out_readchunk:
+ ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p);
+ if (ret < 0)
+ goto out_postfail;
+ return 0;
out_err:
- svc_rdma_send_error(rdma_xprt, &rmsgp->rm_xid, ret);
+ svc_rdma_send_error(rdma_xprt, p, ret);
svc_rdma_put_context(ctxt, 0);
return 0;
-defer:
- return 0;
+out_postfail:
+ if (ret == -EINVAL)
+ svc_rdma_send_error(rdma_xprt, p, ret);
+ svc_rdma_put_context(ctxt, 1);
+ return ret;
out_drop:
svc_rdma_put_context(ctxt, 1);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 0cf620277693..933f79bed270 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -12,6 +12,9 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
+static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
+static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
+
/* Each R/W context contains state for one chain of RDMA Read or
* Write Work Requests.
*
@@ -113,22 +116,20 @@ struct svc_rdma_chunk_ctxt {
struct svcxprt_rdma *cc_rdma;
struct list_head cc_rwctxts;
int cc_sqecount;
- enum dma_data_direction cc_dir;
};
static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
- struct svc_rdma_chunk_ctxt *cc,
- enum dma_data_direction dir)
+ struct svc_rdma_chunk_ctxt *cc)
{
cc->cc_rdma = rdma;
svc_xprt_get(&rdma->sc_xprt);
INIT_LIST_HEAD(&cc->cc_rwctxts);
cc->cc_sqecount = 0;
- cc->cc_dir = dir;
}
-static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc)
+static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
+ enum dma_data_direction dir)
{
struct svcxprt_rdma *rdma = cc->cc_rdma;
struct svc_rdma_rw_ctxt *ctxt;
@@ -138,7 +139,7 @@ static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc)
rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
rdma->sc_port_num, ctxt->rw_sg_table.sgl,
- ctxt->rw_nents, cc->cc_dir);
+ ctxt->rw_nents, dir);
svc_rdma_put_rw_ctxt(rdma, ctxt);
}
svc_xprt_put(&rdma->sc_xprt);
@@ -176,13 +177,14 @@ svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
info->wi_seg_no = 0;
info->wi_nsegs = be32_to_cpup(++chunk);
info->wi_segs = ++chunk;
- svc_rdma_cc_init(rdma, &info->wi_cc, DMA_TO_DEVICE);
+ svc_rdma_cc_init(rdma, &info->wi_cc);
+ info->wi_cc.cc_cqe.done = svc_rdma_write_done;
return info;
}
static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
{
- svc_rdma_cc_release(&info->wi_cc);
+ svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
kfree(info);
}
@@ -216,6 +218,76 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
svc_rdma_write_info_free(info);
}
+/* State for pulling a Read chunk.
+ */
+struct svc_rdma_read_info {
+ struct svc_rdma_op_ctxt *ri_readctxt;
+ unsigned int ri_position;
+ unsigned int ri_pageno;
+ unsigned int ri_pageoff;
+ unsigned int ri_chunklen;
+
+ struct svc_rdma_chunk_ctxt ri_cc;
+};
+
+static struct svc_rdma_read_info *
+svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
+{
+ struct svc_rdma_read_info *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return info;
+
+ svc_rdma_cc_init(rdma, &info->ri_cc);
+ info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
+ return info;
+}
+
+static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
+{
+ svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
+ kfree(info);
+}
+
+/**
+ * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
+ * @cq: controlling Completion Queue
+ * @wc: Work Completion
+ *
+ */
+static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct ib_cqe *cqe = wc->wr_cqe;
+ struct svc_rdma_chunk_ctxt *cc =
+ container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
+ struct svcxprt_rdma *rdma = cc->cc_rdma;
+ struct svc_rdma_read_info *info =
+ container_of(cc, struct svc_rdma_read_info, ri_cc);
+
+ atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
+ wake_up(&rdma->sc_send_wait);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ pr_err("svcrdma: read ctx: %s (%u/0x%x)\n",
+ ib_wc_status_msg(wc->status),
+ wc->status, wc->vendor_err);
+ svc_rdma_put_context(info->ri_readctxt, 1);
+ } else {
+ spin_lock(&rdma->sc_rq_dto_lock);
+ list_add_tail(&info->ri_readctxt->list,
+ &rdma->sc_read_complete_q);
+ spin_unlock(&rdma->sc_rq_dto_lock);
+
+ set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
+ svc_xprt_enqueue(&rdma->sc_xprt);
+ }
+
+ svc_rdma_read_info_free(info);
+}
+
/* This function sleeps when the transport's Send Queue is congested.
*
* Assumptions:
@@ -232,6 +304,9 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
struct ib_cqe *cqe;
int ret;
+ if (cc->cc_sqecount > rdma->sc_sq_depth)
+ return -EINVAL;
+
first_wr = NULL;
cqe = &cc->cc_cqe;
list_for_each(tmp, &cc->cc_rwctxts) {
@@ -295,8 +370,9 @@ static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
struct scatterlist *sg;
struct page **page;
- page_off = (info->wi_next_off + xdr->page_base) & ~PAGE_MASK;
- page_no = (info->wi_next_off + xdr->page_base) >> PAGE_SHIFT;
+ page_off = info->wi_next_off + xdr->page_base;
+ page_no = page_off >> PAGE_SHIFT;
+ page_off = offset_in_page(page_off);
page = xdr->pages + page_no;
info->wi_next_off += remaining;
sg = ctxt->rw_sg_table.sgl;
@@ -332,7 +408,6 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
__be32 *seg;
int ret;
- cc->cc_cqe.done = svc_rdma_write_done;
seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
do {
unsigned int write_len;
@@ -425,6 +500,7 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
*
* Returns a non-negative number of bytes the chunk consumed, or
* %-E2BIG if the payload was larger than the Write chunk,
+ * %-EINVAL if client provided too many segments,
* %-ENOMEM if rdma_rw context pool was exhausted,
* %-ENOTCONN if posting failed (connection is lost),
* %-EIO if rdma_rw initialization failed (DMA mapping, etc).
@@ -465,6 +541,7 @@ out_err:
*
* Returns a non-negative number of bytes the chunk consumed, or
* %-E2BIG if the payload was larger than the Reply chunk,
+ * %-EINVAL if client provided too many segments,
* %-ENOMEM if rdma_rw context pool was exhausted,
* %-ENOTCONN if posting failed (connection is lost),
* %-EIO if rdma_rw initialization failed (DMA mapping, etc).
@@ -510,3 +587,353 @@ out_err:
svc_rdma_write_info_free(info);
return ret;
}
+
+static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
+ struct svc_rqst *rqstp,
+ u32 rkey, u32 len, u64 offset)
+{
+ struct svc_rdma_op_ctxt *head = info->ri_readctxt;
+ struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
+ struct svc_rdma_rw_ctxt *ctxt;
+ unsigned int sge_no, seg_len;
+ struct scatterlist *sg;
+ int ret;
+
+ sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
+ ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
+ if (!ctxt)
+ goto out_noctx;
+ ctxt->rw_nents = sge_no;
+
+ dprintk("svcrdma: reading segment %u@0x%016llx:0x%08x (%u sges)\n",
+ len, offset, rkey, sge_no);
+
+ sg = ctxt->rw_sg_table.sgl;
+ for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
+ seg_len = min_t(unsigned int, len,
+ PAGE_SIZE - info->ri_pageoff);
+
+ head->arg.pages[info->ri_pageno] =
+ rqstp->rq_pages[info->ri_pageno];
+ if (!info->ri_pageoff)
+ head->count++;
+
+ sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
+ seg_len, info->ri_pageoff);
+ sg = sg_next(sg);
+
+ info->ri_pageoff += seg_len;
+ if (info->ri_pageoff == PAGE_SIZE) {
+ info->ri_pageno++;
+ info->ri_pageoff = 0;
+ }
+ len -= seg_len;
+
+ /* Safety check */
+ if (len &&
+ &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
+ goto out_overrun;
+ }
+
+ ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
+ cc->cc_rdma->sc_port_num,
+ ctxt->rw_sg_table.sgl, ctxt->rw_nents,
+ 0, offset, rkey, DMA_FROM_DEVICE);
+ if (ret < 0)
+ goto out_initerr;
+
+ list_add(&ctxt->rw_list, &cc->cc_rwctxts);
+ cc->cc_sqecount += ret;
+ return 0;
+
+out_noctx:
+ dprintk("svcrdma: no R/W ctxs available\n");
+ return -ENOMEM;
+
+out_overrun:
+ dprintk("svcrdma: request overruns rq_pages\n");
+ return -EINVAL;
+
+out_initerr:
+ svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
+ pr_err("svcrdma: failed to map pagelist (%d)\n", ret);
+ return -EIO;
+}
+
+static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
+ struct svc_rdma_read_info *info,
+ __be32 *p)
+{
+ int ret;
+
+ info->ri_chunklen = 0;
+ while (*p++ != xdr_zero) {
+ u32 rs_handle, rs_length;
+ u64 rs_offset;
+
+ if (be32_to_cpup(p++) != info->ri_position)
+ break;
+ rs_handle = be32_to_cpup(p++);
+ rs_length = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &rs_offset);
+
+ ret = svc_rdma_build_read_segment(info, rqstp,
+ rs_handle, rs_length,
+ rs_offset);
+ if (ret < 0)
+ break;
+
+ info->ri_chunklen += rs_length;
+ }
+
+ return ret;
+}
+
+/* If there is inline content following the Read chunk, append it to
+ * the page list immediately following the data payload. This has to
+ * be done after the reader function has determined how many pages
+ * were consumed for RDMA Read.
+ *
+ * On entry, ri_pageno and ri_pageoff point directly to the end of the
+ * page list. On exit, both have been updated to the new "next byte".
+ *
+ * Assumptions:
+ * - Inline content fits entirely in rq_pages[0]
+ * - Trailing content is only a handful of bytes
+ */
+static int svc_rdma_copy_tail(struct svc_rqst *rqstp,
+ struct svc_rdma_read_info *info)
+{
+ struct svc_rdma_op_ctxt *head = info->ri_readctxt;
+ unsigned int tail_length, remaining;
+ u8 *srcp, *destp;
+
+ /* Assert that all inline content fits in page 0. This is an
+ * implementation limit, not a protocol limit.
+ */
+ if (head->arg.head[0].iov_len > PAGE_SIZE) {
+ pr_warn_once("svcrdma: too much trailing inline content\n");
+ return -EINVAL;
+ }
+
+ srcp = head->arg.head[0].iov_base;
+ srcp += info->ri_position;
+ tail_length = head->arg.head[0].iov_len - info->ri_position;
+ remaining = tail_length;
+
+ /* If there is room on the last page in the page list, try to
+ * fit the trailing content there.
+ */
+ if (info->ri_pageoff > 0) {
+ unsigned int len;
+
+ len = min_t(unsigned int, remaining,
+ PAGE_SIZE - info->ri_pageoff);
+ destp = page_address(rqstp->rq_pages[info->ri_pageno]);
+ destp += info->ri_pageoff;
+
+ memcpy(destp, srcp, len);
+ srcp += len;
+ destp += len;
+ info->ri_pageoff += len;
+ remaining -= len;
+
+ if (info->ri_pageoff == PAGE_SIZE) {
+ info->ri_pageno++;
+ info->ri_pageoff = 0;
+ }
+ }
+
+ /* Otherwise, a fresh page is needed. */
+ if (remaining) {
+ head->arg.pages[info->ri_pageno] =
+ rqstp->rq_pages[info->ri_pageno];
+ head->count++;
+
+ destp = page_address(rqstp->rq_pages[info->ri_pageno]);
+ memcpy(destp, srcp, remaining);
+ info->ri_pageoff += remaining;
+ }
+
+ head->arg.page_len += tail_length;
+ head->arg.len += tail_length;
+ head->arg.buflen += tail_length;
+ return 0;
+}
+
+/* Construct RDMA Reads to pull over a normal Read chunk. The chunk
+ * data lands in the page list of head->arg.pages.
+ *
+ * Currently NFSD does not look at the head->arg.tail[0] iovec.
+ * Therefore, XDR round-up of the Read chunk and trailing
+ * inline content must both be added at the end of the pagelist.
+ */
+static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
+ struct svc_rdma_read_info *info,
+ __be32 *p)
+{
+ struct svc_rdma_op_ctxt *head = info->ri_readctxt;
+ int ret;
+
+ dprintk("svcrdma: Reading Read chunk at position %u\n",
+ info->ri_position);
+
+ info->ri_pageno = head->hdr_count;
+ info->ri_pageoff = 0;
+
+ ret = svc_rdma_build_read_chunk(rqstp, info, p);
+ if (ret < 0)
+ goto out;
+
+ /* Read chunk may need XDR round-up (see RFC 5666, s. 3.7).
+ */
+ if (info->ri_chunklen & 3) {
+ u32 padlen = 4 - (info->ri_chunklen & 3);
+
+ info->ri_chunklen += padlen;
+
+ /* NB: data payload always starts on XDR alignment,
+ * thus the pad can never contain a page boundary.
+ */
+ info->ri_pageoff += padlen;
+ if (info->ri_pageoff == PAGE_SIZE) {
+ info->ri_pageno++;
+ info->ri_pageoff = 0;
+ }
+ }
+
+ head->arg.page_len = info->ri_chunklen;
+ head->arg.len += info->ri_chunklen;
+ head->arg.buflen += info->ri_chunklen;
+
+ if (info->ri_position < head->arg.head[0].iov_len) {
+ ret = svc_rdma_copy_tail(rqstp, info);
+ if (ret < 0)
+ goto out;
+ }
+ head->arg.head[0].iov_len = info->ri_position;
+
+out:
+ return ret;
+}
+
+/* Construct RDMA Reads to pull over a Position Zero Read chunk.
+ * The start of the data lands in the first page just after
+ * the Transport header, and the rest lands in the page list of
+ * head->arg.pages.
+ *
+ * Assumptions:
+ * - A PZRC has an XDR-aligned length (no implicit round-up).
+ * - There can be no trailing inline content (IOW, we assume
+ * a PZRC is never sent in an RDMA_MSG message, though it's
+ * allowed by spec).
+ */
+static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
+ struct svc_rdma_read_info *info,
+ __be32 *p)
+{
+ struct svc_rdma_op_ctxt *head = info->ri_readctxt;
+ int ret;
+
+ dprintk("svcrdma: Reading Position Zero Read chunk\n");
+
+ info->ri_pageno = head->hdr_count - 1;
+ info->ri_pageoff = offset_in_page(head->byte_len);
+
+ ret = svc_rdma_build_read_chunk(rqstp, info, p);
+ if (ret < 0)
+ goto out;
+
+ head->arg.len += info->ri_chunklen;
+ head->arg.buflen += info->ri_chunklen;
+
+ if (head->arg.buflen <= head->sge[0].length) {
+ /* Transport header and RPC message fit entirely
+ * in page where head iovec resides.
+ */
+ head->arg.head[0].iov_len = info->ri_chunklen;
+ } else {
+ /* Transport header and part of RPC message reside
+ * in the head iovec's page.
+ */
+ head->arg.head[0].iov_len =
+ head->sge[0].length - head->byte_len;
+ head->arg.page_len =
+ info->ri_chunklen - head->arg.head[0].iov_len;
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
+ * @rdma: controlling RDMA transport
+ * @rqstp: set of pages to use as Read sink buffers
+ * @head: pages under I/O collect here
+ * @p: pointer to start of Read chunk
+ *
+ * Returns:
+ * %0 if all needed RDMA Reads were posted successfully,
+ * %-EINVAL if client provided too many segments,
+ * %-ENOMEM if rdma_rw context pool was exhausted,
+ * %-ENOTCONN if posting failed (connection is lost),
+ * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
+ *
+ * Assumptions:
+ * - All Read segments in @p have the same Position value.
+ */
+int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
+ struct svc_rdma_op_ctxt *head, __be32 *p)
+{
+ struct svc_rdma_read_info *info;
+ struct page **page;
+ int ret;
+
+ /* The request (with page list) is constructed in
+ * head->arg. Pages involved with RDMA Read I/O are
+ * transferred there.
+ */
+ head->hdr_count = head->count;
+ head->arg.head[0] = rqstp->rq_arg.head[0];
+ head->arg.tail[0] = rqstp->rq_arg.tail[0];
+ head->arg.pages = head->pages;
+ head->arg.page_base = 0;
+ head->arg.page_len = 0;
+ head->arg.len = rqstp->rq_arg.len;
+ head->arg.buflen = rqstp->rq_arg.buflen;
+
+ info = svc_rdma_read_info_alloc(rdma);
+ if (!info)
+ return -ENOMEM;
+ info->ri_readctxt = head;
+
+ info->ri_position = be32_to_cpup(p + 1);
+ if (info->ri_position)
+ ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
+ else
+ ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
+
+ /* Mark the start of the pages that can be used for the reply */
+ if (info->ri_pageoff > 0)
+ info->ri_pageno++;
+ rqstp->rq_respages = &rqstp->rq_pages[info->ri_pageno];
+ rqstp->rq_next_page = rqstp->rq_respages + 1;
+
+ if (ret < 0)
+ goto out;
+
+ ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
+
+out:
+ /* Read sink pages have been moved from rqstp->rq_pages to
+ * head->arg.pages. Force svc_recv to refill those slots
+ * in rq_pages.
+ */
+ for (page = rqstp->rq_pages; page < rqstp->rq_respages; page++)
+ *page = NULL;
+
+ if (ret < 0)
+ svc_rdma_read_info_free(info);
+ return ret;
+}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 1736337f3a55..7c3a211e0e9a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -313,13 +313,17 @@ static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
dma_addr = ib_dma_map_page(dev, virt_to_page(base),
offset, len, DMA_TO_DEVICE);
if (ib_dma_mapping_error(dev, dma_addr))
- return -EIO;
+ goto out_maperr;
ctxt->sge[sge_no].addr = dma_addr;
ctxt->sge[sge_no].length = len;
ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
svc_rdma_count_mappings(rdma, ctxt);
return 0;
+
+out_maperr:
+ pr_err("svcrdma: failed to map buffer\n");
+ return -EIO;
}
static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
@@ -334,13 +338,17 @@ static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
if (ib_dma_mapping_error(dev, dma_addr))
- return -EIO;
+ goto out_maperr;
ctxt->sge[sge_no].addr = dma_addr;
ctxt->sge[sge_no].length = len;
ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
svc_rdma_count_mappings(rdma, ctxt);
return 0;
+
+out_maperr:
+ pr_err("svcrdma: failed to map page\n");
+ return -EIO;
}
/**
@@ -547,7 +555,6 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
return 0;
err:
- pr_err("svcrdma: failed to post Send WR (%d)\n", ret);
svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
return ret;
@@ -677,7 +684,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
return 0;
err2:
- if (ret != -E2BIG)
+ if (ret != -E2BIG && ret != -EINVAL)
goto err1;
ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index a9d9cb1ba4c6..e660d4965b18 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -202,7 +202,6 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
out:
ctxt->count = 0;
ctxt->mapped_sges = 0;
- ctxt->frmr = NULL;
return ctxt;
out_empty:
@@ -226,22 +225,13 @@ void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
{
struct svcxprt_rdma *xprt = ctxt->xprt;
struct ib_device *device = xprt->sc_cm_id->device;
- u32 lkey = xprt->sc_pd->local_dma_lkey;
unsigned int i;
- for (i = 0; i < ctxt->mapped_sges; i++) {
- /*
- * Unmap the DMA addr in the SGE if the lkey matches
- * the local_dma_lkey, otherwise, ignore it since it is
- * an FRMR lkey and will be unmapped later when the
- * last WR that uses it completes.
- */
- if (ctxt->sge[i].lkey == lkey)
- ib_dma_unmap_page(device,
- ctxt->sge[i].addr,
- ctxt->sge[i].length,
- ctxt->direction);
- }
+ for (i = 0; i < ctxt->mapped_sges; i++)
+ ib_dma_unmap_page(device,
+ ctxt->sge[i].addr,
+ ctxt->sge[i].length,
+ ctxt->direction);
ctxt->mapped_sges = 0;
}
@@ -346,36 +336,6 @@ out:
svc_xprt_put(&xprt->sc_xprt);
}
-static void svc_rdma_send_wc_common(struct svcxprt_rdma *xprt,
- struct ib_wc *wc,
- const char *opname)
-{
- if (wc->status != IB_WC_SUCCESS)
- goto err;
-
-out:
- atomic_inc(&xprt->sc_sq_avail);
- wake_up(&xprt->sc_send_wait);
- return;
-
-err:
- set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- pr_err("svcrdma: %s: %s (%u/0x%x)\n",
- opname, ib_wc_status_msg(wc->status),
- wc->status, wc->vendor_err);
- goto out;
-}
-
-static void svc_rdma_send_wc_common_put(struct ib_cq *cq, struct ib_wc *wc,
- const char *opname)
-{
- struct svcxprt_rdma *xprt = cq->cq_context;
-
- svc_rdma_send_wc_common(xprt, wc, opname);
- svc_xprt_put(&xprt->sc_xprt);
-}
-
/**
* svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
* @cq: completion queue
@@ -384,73 +344,28 @@ static void svc_rdma_send_wc_common_put(struct ib_cq *cq, struct ib_wc *wc,
*/
void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
{
- struct ib_cqe *cqe = wc->wr_cqe;
- struct svc_rdma_op_ctxt *ctxt;
-
- svc_rdma_send_wc_common_put(cq, wc, "send");
-
- ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
- svc_rdma_unmap_dma(ctxt);
- svc_rdma_put_context(ctxt, 1);
-}
-
-/**
- * svc_rdma_wc_reg - Invoked by RDMA provider for each polled FASTREG WC
- * @cq: completion queue
- * @wc: completed WR
- *
- */
-void svc_rdma_wc_reg(struct ib_cq *cq, struct ib_wc *wc)
-{
- svc_rdma_send_wc_common_put(cq, wc, "fastreg");
-}
-
-/**
- * svc_rdma_wc_read - Invoked by RDMA provider for each polled Read WC
- * @cq: completion queue
- * @wc: completed WR
- *
- */
-void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc)
-{
struct svcxprt_rdma *xprt = cq->cq_context;
struct ib_cqe *cqe = wc->wr_cqe;
struct svc_rdma_op_ctxt *ctxt;
- svc_rdma_send_wc_common(xprt, wc, "read");
+ atomic_inc(&xprt->sc_sq_avail);
+ wake_up(&xprt->sc_send_wait);
ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
svc_rdma_unmap_dma(ctxt);
- svc_rdma_put_frmr(xprt, ctxt->frmr);
-
- if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
- struct svc_rdma_op_ctxt *read_hdr;
-
- read_hdr = ctxt->read_hdr;
- spin_lock(&xprt->sc_rq_dto_lock);
- list_add_tail(&read_hdr->list,
- &xprt->sc_read_complete_q);
- spin_unlock(&xprt->sc_rq_dto_lock);
+ svc_rdma_put_context(ctxt, 1);
- set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
- svc_xprt_enqueue(&xprt->sc_xprt);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ pr_err("svcrdma: Send: %s (%u/0x%x)\n",
+ ib_wc_status_msg(wc->status),
+ wc->status, wc->vendor_err);
}
- svc_rdma_put_context(ctxt, 0);
svc_xprt_put(&xprt->sc_xprt);
}
-/**
- * svc_rdma_wc_inv - Invoked by RDMA provider for each polled LOCAL_INV WC
- * @cq: completion queue
- * @wc: completed WR
- *
- */
-void svc_rdma_wc_inv(struct ib_cq *cq, struct ib_wc *wc)
-{
- svc_rdma_send_wc_common_put(cq, wc, "localInv");
-}
-
static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
int listener)
{
@@ -462,14 +377,12 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
- INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
INIT_LIST_HEAD(&cma_xprt->sc_ctxts);
INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
init_waitqueue_head(&cma_xprt->sc_send_wait);
spin_lock_init(&cma_xprt->sc_lock);
spin_lock_init(&cma_xprt->sc_rq_dto_lock);
- spin_lock_init(&cma_xprt->sc_frmr_q_lock);
spin_lock_init(&cma_xprt->sc_ctxt_lock);
spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
@@ -780,86 +693,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
return ERR_PTR(ret);
}
-static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
-{
- struct ib_mr *mr;
- struct scatterlist *sg;
- struct svc_rdma_fastreg_mr *frmr;
- u32 num_sg;
-
- frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
- if (!frmr)
- goto err;
-
- num_sg = min_t(u32, RPCSVC_MAXPAGES, xprt->sc_frmr_pg_list_len);
- mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg);
- if (IS_ERR(mr))
- goto err_free_frmr;
-
- sg = kcalloc(RPCSVC_MAXPAGES, sizeof(*sg), GFP_KERNEL);
- if (!sg)
- goto err_free_mr;
-
- sg_init_table(sg, RPCSVC_MAXPAGES);
-
- frmr->mr = mr;
- frmr->sg = sg;
- INIT_LIST_HEAD(&frmr->frmr_list);
- return frmr;
-
- err_free_mr:
- ib_dereg_mr(mr);
- err_free_frmr:
- kfree(frmr);
- err:
- return ERR_PTR(-ENOMEM);
-}
-
-static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
-{
- struct svc_rdma_fastreg_mr *frmr;
-
- while (!list_empty(&xprt->sc_frmr_q)) {
- frmr = list_entry(xprt->sc_frmr_q.next,
- struct svc_rdma_fastreg_mr, frmr_list);
- list_del_init(&frmr->frmr_list);
- kfree(frmr->sg);
- ib_dereg_mr(frmr->mr);
- kfree(frmr);
- }
-}
-
-struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
-{
- struct svc_rdma_fastreg_mr *frmr = NULL;
-
- spin_lock(&rdma->sc_frmr_q_lock);
- if (!list_empty(&rdma->sc_frmr_q)) {
- frmr = list_entry(rdma->sc_frmr_q.next,
- struct svc_rdma_fastreg_mr, frmr_list);
- list_del_init(&frmr->frmr_list);
- frmr->sg_nents = 0;
- }
- spin_unlock(&rdma->sc_frmr_q_lock);
- if (frmr)
- return frmr;
-
- return rdma_alloc_frmr(rdma);
-}
-
-void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
- struct svc_rdma_fastreg_mr *frmr)
-{
- if (frmr) {
- ib_dma_unmap_sg(rdma->sc_cm_id->device,
- frmr->sg, frmr->sg_nents, frmr->direction);
- spin_lock(&rdma->sc_frmr_q_lock);
- WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
- list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
- spin_unlock(&rdma->sc_frmr_q_lock);
- }
-}
-
/*
* This is the xpo_recvfrom function for listening endpoints. Its
* purpose is to accept incoming connections. The CMA callback handler
@@ -908,8 +741,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
* capabilities of this particular device */
newxprt->sc_max_sge = min((size_t)dev->attrs.max_sge,
(size_t)RPCSVC_MAXPAGES);
- newxprt->sc_max_sge_rd = min_t(size_t, dev->attrs.max_sge_rd,
- RPCSVC_MAXPAGES);
newxprt->sc_max_req_size = svcrdma_max_req_size;
newxprt->sc_max_requests = min_t(u32, dev->attrs.max_qp_wr,
svcrdma_max_requests);
@@ -952,7 +783,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.event_handler = qp_event_handler;
qp_attr.qp_context = &newxprt->sc_xprt;
- qp_attr.port_num = newxprt->sc_cm_id->port_num;
+ qp_attr.port_num = newxprt->sc_port_num;
qp_attr.cap.max_rdma_ctxs = newxprt->sc_max_requests;
qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth;
@@ -976,47 +807,12 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
}
newxprt->sc_qp = newxprt->sc_cm_id->qp;
- /*
- * Use the most secure set of MR resources based on the
- * transport type and available memory management features in
- * the device. Here's the table implemented below:
- *
- * Fast Global DMA Remote WR
- * Reg LKEY MR Access
- * Sup'd Sup'd Needed Needed
- *
- * IWARP N N Y Y
- * N Y Y Y
- * Y N Y N
- * Y Y N -
- *
- * IB N N Y N
- * N Y N -
- * Y N Y N
- * Y Y N -
- *
- * NB: iWARP requires remote write access for the data sink
- * of an RDMA_READ. IB does not.
- */
- newxprt->sc_reader = rdma_read_chunk_lcl;
- if (dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
- newxprt->sc_frmr_pg_list_len =
- dev->attrs.max_fast_reg_page_list_len;
- newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
- newxprt->sc_reader = rdma_read_chunk_frmr;
- } else
+ if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
newxprt->sc_snd_w_inv = false;
-
- /*
- * Determine if a DMA MR is required and if so, what privs are required
- */
- if (!rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num) &&
- !rdma_ib_or_roce(dev, newxprt->sc_cm_id->port_num))
+ if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) &&
+ !rdma_ib_or_roce(dev, newxprt->sc_port_num))
goto errout;
- if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num))
- newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
-
/* Post receive buffers */
for (i = 0; i < newxprt->sc_max_requests; i++) {
ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
@@ -1056,7 +852,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap));
dprintk(" max_sge : %d\n", newxprt->sc_max_sge);
- dprintk(" max_sge_rd : %d\n", newxprt->sc_max_sge_rd);
dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth);
dprintk(" max_requests : %d\n", newxprt->sc_max_requests);
dprintk(" ord : %d\n", newxprt->sc_ord);
@@ -1117,12 +912,6 @@ static void __svc_rdma_free(struct work_struct *work)
pr_err("svcrdma: sc_xprt still in use? (%d)\n",
kref_read(&xprt->xpt_ref));
- /*
- * Destroy queued, but not processed read completions. Note
- * that this cleanup has to be done before destroying the
- * cm_id because the device ptr is needed to unmap the dma in
- * svc_rdma_put_context.
- */
while (!list_empty(&rdma->sc_read_complete_q)) {
struct svc_rdma_op_ctxt *ctxt;
ctxt = list_first_entry(&rdma->sc_read_complete_q,
@@ -1130,8 +919,6 @@ static void __svc_rdma_free(struct work_struct *work)
list_del(&ctxt->list);
svc_rdma_put_context(ctxt, 1);
}
-
- /* Destroy queued, but not processed recv completions */
while (!list_empty(&rdma->sc_rq_dto_q)) {
struct svc_rdma_op_ctxt *ctxt;
ctxt = list_first_entry(&rdma->sc_rq_dto_q,
@@ -1151,7 +938,6 @@ static void __svc_rdma_free(struct work_struct *work)
xprt->xpt_bc_xprt = NULL;
}
- rdma_dealloc_frmr_q(rdma);
svc_rdma_destroy_rw_ctxts(rdma);
svc_rdma_destroy_ctxts(rdma);
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 62ecbccd9748..d1c458e5ec4d 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -684,7 +684,8 @@ xprt_rdma_free(struct rpc_task *task)
dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
- if (unlikely(!list_empty(&req->rl_registered)))
+ rpcrdma_remove_req(&r_xprt->rx_buf, req);
+ if (!list_empty(&req->rl_registered))
ia->ri_ops->ro_unmap_safe(r_xprt, req, !RPC_IS_ASYNC(task));
rpcrdma_unmap_sges(ia, req);
rpcrdma_buffer_put(req);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 3dbce9ac4327..e4171f2abe37 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -243,8 +243,6 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
#endif
- struct ib_qp_attr *attr = &ia->ri_qp_attr;
- struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
int connstate = 0;
switch (event->event) {
@@ -267,7 +265,8 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
- pr_info("rpcrdma: removing device for %pIS:%u\n",
+ pr_info("rpcrdma: removing device %s for %pIS:%u\n",
+ ia->ri_device->name,
sap, rpc_get_port(sap));
#endif
set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
@@ -282,13 +281,6 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
return 1;
case RDMA_CM_EVENT_ESTABLISHED:
connstate = 1;
- ib_query_qp(ia->ri_id->qp, attr,
- IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
- iattr);
- dprintk("RPC: %s: %d responder resources"
- " (%d initiator)\n",
- __func__, attr->max_dest_rd_atomic,
- attr->max_rd_atomic);
rpcrdma_update_connect_private(xprt, &event->param.conn);
goto connected;
case RDMA_CM_EVENT_CONNECT_ERROR:
@@ -298,11 +290,9 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
connstate = -ENETDOWN;
goto connected;
case RDMA_CM_EVENT_REJECTED:
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
- pr_info("rpcrdma: connection to %pIS:%u on %s rejected: %s\n",
- sap, rpc_get_port(sap), ia->ri_device->name,
+ dprintk("rpcrdma: connection to %pIS:%u rejected: %s\n",
+ sap, rpc_get_port(sap),
rdma_reject_msg(id, event->status));
-#endif
connstate = -ECONNREFUSED;
if (event->status == IB_CM_REJ_STALE_CONN)
connstate = -EAGAIN;
@@ -310,37 +300,19 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_DISCONNECTED:
connstate = -ECONNABORTED;
connected:
- dprintk("RPC: %s: %sconnected\n",
- __func__, connstate > 0 ? "" : "dis");
atomic_set(&xprt->rx_buf.rb_credits, 1);
ep->rep_connected = connstate;
rpcrdma_conn_func(ep);
wake_up_all(&ep->rep_connect_wait);
/*FALLTHROUGH*/
default:
- dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n",
- __func__, sap, rpc_get_port(sap), ep,
- rdma_event_msg(event->event));
+ dprintk("RPC: %s: %pIS:%u on %s/%s (ep 0x%p): %s\n",
+ __func__, sap, rpc_get_port(sap),
+ ia->ri_device->name, ia->ri_ops->ro_displayname,
+ ep, rdma_event_msg(event->event));
break;
}
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
- if (connstate == 1) {
- int ird = attr->max_dest_rd_atomic;
- int tird = ep->rep_remote_cma.responder_resources;
-
- pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
- sap, rpc_get_port(sap),
- ia->ri_device->name,
- ia->ri_ops->ro_displayname,
- xprt->rx_buf.rb_max_requests,
- ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
- } else if (connstate < 0) {
- pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
- sap, rpc_get_port(sap), connstate);
- }
-#endif
-
return 0;
}
@@ -971,7 +943,6 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
if (req == NULL)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&req->rl_free);
spin_lock(&buffer->rb_reqslock);
list_add(&req->rl_all, &buffer->rb_allreqs);
spin_unlock(&buffer->rb_reqslock);
@@ -1033,6 +1004,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
spin_lock_init(&buf->rb_recovery_lock);
INIT_LIST_HEAD(&buf->rb_mws);
INIT_LIST_HEAD(&buf->rb_all);
+ INIT_LIST_HEAD(&buf->rb_pending);
INIT_LIST_HEAD(&buf->rb_stale_mrs);
INIT_DELAYED_WORK(&buf->rb_refresh_worker,
rpcrdma_mr_refresh_worker);
@@ -1055,7 +1027,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
goto out;
}
req->rl_backchannel = false;
- list_add(&req->rl_free, &buf->rb_send_bufs);
+ list_add(&req->rl_list, &buf->rb_send_bufs);
}
INIT_LIST_HEAD(&buf->rb_recv_bufs);
@@ -1084,8 +1056,8 @@ rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf)
struct rpcrdma_req *req;
req = list_first_entry(&buf->rb_send_bufs,
- struct rpcrdma_req, rl_free);
- list_del(&req->rl_free);
+ struct rpcrdma_req, rl_list);
+ list_del_init(&req->rl_list);
return req;
}
@@ -1187,6 +1159,7 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
if (!mw)
goto out_nomws;
+ mw->mw_flags = 0;
return mw;
out_nomws:
@@ -1267,7 +1240,7 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
spin_lock(&buffers->rb_lock);
buffers->rb_send_count--;
- list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
+ list_add_tail(&req->rl_list, &buffers->rb_send_bufs);
if (rep) {
buffers->rb_recv_count--;
list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 1d66acf1a723..b282d3f8cdd8 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -271,6 +271,7 @@ struct rpcrdma_mw {
struct scatterlist *mw_sg;
int mw_nents;
enum dma_data_direction mw_dir;
+ unsigned long mw_flags;
union {
struct rpcrdma_fmr fmr;
struct rpcrdma_frmr frmr;
@@ -282,6 +283,11 @@ struct rpcrdma_mw {
struct list_head mw_all;
};
+/* mw_flags */
+enum {
+ RPCRDMA_MW_F_RI = 1,
+};
+
/*
* struct rpcrdma_req -- structure central to the request/reply sequence.
*
@@ -334,7 +340,8 @@ enum {
struct rpcrdma_buffer;
struct rpcrdma_req {
- struct list_head rl_free;
+ struct list_head rl_list;
+ __be32 rl_xid;
unsigned int rl_mapped_sges;
unsigned int rl_connect_cookie;
struct rpcrdma_buffer *rl_buffer;
@@ -396,6 +403,7 @@ struct rpcrdma_buffer {
int rb_send_count, rb_recv_count;
struct list_head rb_send_bufs;
struct list_head rb_recv_bufs;
+ struct list_head rb_pending;
u32 rb_max_requests;
atomic_t rb_credits; /* most recent credit grant */
@@ -461,7 +469,7 @@ struct rpcrdma_memreg_ops {
struct rpcrdma_mr_seg *, int, bool,
struct rpcrdma_mw **);
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
- struct rpcrdma_req *);
+ struct list_head *);
void (*ro_unmap_safe)(struct rpcrdma_xprt *,
struct rpcrdma_req *, bool);
void (*ro_recover_mr)(struct rpcrdma_mw *);
@@ -544,6 +552,34 @@ void rpcrdma_destroy_req(struct rpcrdma_req *);
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
+static inline void
+rpcrdma_insert_req(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
+{
+ spin_lock(&buffers->rb_lock);
+ if (list_empty(&req->rl_list))
+ list_add_tail(&req->rl_list, &buffers->rb_pending);
+ spin_unlock(&buffers->rb_lock);
+}
+
+static inline struct rpcrdma_req *
+rpcrdma_lookup_req_locked(struct rpcrdma_buffer *buffers, __be32 xid)
+{
+ struct rpcrdma_req *pos;
+
+ list_for_each_entry(pos, &buffers->rb_pending, rl_list)
+ if (pos->rl_xid == xid)
+ return pos;
+ return NULL;
+}
+
+static inline void
+rpcrdma_remove_req(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
+{
+ spin_lock(&buffers->rb_lock);
+ list_del(&req->rl_list);
+ spin_unlock(&buffers->rb_lock);
+}
+
struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c
index aa243db93f01..be0d4a5fdf53 100644
--- a/samples/kfifo/dma-example.c
+++ b/samples/kfifo/dma-example.c
@@ -75,8 +75,8 @@ static int __init example_init(void)
for (i = 0; i < nents; i++) {
printk(KERN_INFO
"sg[%d] -> "
- "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
- i, sg[i].page_link, sg[i].offset, sg[i].length);
+ "page %p offset 0x%.8x length 0x%.8x\n",
+ i, sg_page(&sg[i]), sg[i].offset, sg[i].length);
if (sg_is_last(&sg[i]))
break;
@@ -104,8 +104,8 @@ static int __init example_init(void)
for (i = 0; i < nents; i++) {
printk(KERN_INFO
"sg[%d] -> "
- "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
- i, sg[i].page_link, sg[i].offset, sg[i].length);
+ "page %p offset 0x%.8x length 0x%.8x\n",
+ i, sg_page(&sg[i]), sg[i].offset, sg[i].length);
if (sg_is_last(&sg[i]))
break;
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
index c583a1e1bd3c..343d586e566e 100644
--- a/scripts/Makefile.headersinst
+++ b/scripts/Makefile.headersinst
@@ -23,15 +23,12 @@ subdirs := $(patsubst $(srcdir)/%/,%,\
$(filter-out $(srcdir)/,\
$(sort $(dir $(wildcard $(srcdir)/*/)))))
-# caller may set destination dir (when installing to asm/)
-_dst := $(if $(dst),$(dst),$(obj))
-
# Recursion
__headers: $(subdirs)
.PHONY: $(subdirs)
$(subdirs):
- $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@
+ $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(dst)/$@
# Skip header install/check for include/uapi and arch/$(hdr-arch)/include/uapi.
# We have only sub-directories there.
@@ -39,21 +36,12 @@ skip-inst := $(if $(filter %/uapi,$(obj)),1)
ifeq ($(skip-inst),)
-# generated header directory
-gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj)))
-
# Kbuild file is optional
kbuild-file := $(srctree)/$(obj)/Kbuild
-include $(kbuild-file)
-old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild
-ifneq ($(wildcard $(old-kbuild-file)),)
-include $(old-kbuild-file)
-endif
-
-installdir := $(INSTALL_HDR_PATH)/$(subst uapi/,,$(_dst))
-
-gendir := $(objtree)/$(gen)
+installdir := $(INSTALL_HDR_PATH)/$(dst)
+gendir := $(objtree)/$(subst include/,include/generated/,$(obj))
header-files := $(notdir $(wildcard $(srcdir)/*.h))
header-files += $(notdir $(wildcard $(srcdir)/*.agh))
header-files := $(filter-out $(no-export-headers), $(header-files))
@@ -64,14 +52,8 @@ genhdr-files := $(filter-out $(header-files), $(genhdr-files))
install-file := $(installdir)/.install
check-file := $(installdir)/.check
-# generic-y list all files an architecture uses from asm-generic
-# Use this to build a list of headers which require a wrapper
-generic-files := $(notdir $(wildcard $(srctree)/include/uapi/asm-generic/*.h))
-wrapper-files := $(filter $(generic-files), $(generic-y))
-wrapper-files := $(filter-out $(header-files), $(wrapper-files))
-
# all headers files for this dir
-all-files := $(header-files) $(genhdr-files) $(wrapper-files)
+all-files := $(header-files) $(genhdr-files)
output-files := $(addprefix $(installdir)/, $(all-files))
ifneq ($(mandatory-y),)
@@ -95,9 +77,6 @@ quiet_cmd_install = INSTALL $(printdir) ($(words $(all-files))\
cmd_install = \
$(CONFIG_SHELL) $< $(installdir) $(srcdir) $(header-files); \
$(CONFIG_SHELL) $< $(installdir) $(gendir) $(genhdr-files); \
- for F in $(wrapper-files); do \
- echo "\#include <asm-generic/$$F>" > $(installdir)/$$F; \
- done; \
touch $@
quiet_cmd_remove = REMOVE $(unwanted)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 8f940c09918f..2287a0bca863 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5576,10 +5576,18 @@ sub process {
"architecture specific defines should be avoided\n" . $herecurr);
}
+# check that the storage class is not after a type
+ if ($line =~ /\b($Type)\s+($Storage)\b/) {
+ WARN("STORAGE_CLASS",
+ "storage class '$2' should be located before type '$1'\n" . $herecurr);
+ }
# Check that the storage class is at the beginning of a declaration
- if ($line =~ /\b$Storage\b/ && $line !~ /^.\s*$Storage\b/) {
+ if ($line =~ /\b$Storage\b/ &&
+ $line !~ /^.\s*$Storage/ &&
+ $line =~ /^.\s*(.+?)\$Storage\s/ &&
+ $1 !~ /[\,\)]\s*$/) {
WARN("STORAGE_CLASS",
- "storage class should be at the beginning of the declaration\n" . $herecurr)
+ "storage class should be at the beginning of the declaration\n" . $herecurr);
}
# check the location of the inline attribute, that it is between
diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
index 7986f4e0da12..7aad82406422 100644
--- a/scripts/gdb/linux/constants.py.in
+++ b/scripts/gdb/linux/constants.py.in
@@ -14,6 +14,7 @@
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/of_fdt.h>
/* We need to stringify expanded macros so that they can be parsed */
@@ -50,3 +51,9 @@ LX_VALUE(MNT_NOEXEC)
LX_VALUE(MNT_NOATIME)
LX_VALUE(MNT_NODIRATIME)
LX_VALUE(MNT_RELATIME)
+
+/* linux/of_fdt.h> */
+LX_VALUE(OF_DT_HEADER)
+
+/* Kernel Configs */
+LX_CONFIG(CONFIG_OF)
diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py
index 5afd1098e33a..6d2e09a2ad2f 100644
--- a/scripts/gdb/linux/dmesg.py
+++ b/scripts/gdb/linux/dmesg.py
@@ -12,6 +12,7 @@
#
import gdb
+import sys
from linux import utils
@@ -24,7 +25,7 @@ class LxDmesg(gdb.Command):
def invoke(self, arg, from_tty):
log_buf_addr = int(str(gdb.parse_and_eval(
- "'printk.c'::log_buf")).split()[0], 16)
+ "(void *)'printk.c'::log_buf")).split()[0], 16)
log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
@@ -52,13 +53,19 @@ class LxDmesg(gdb.Command):
continue
text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
- text = log_buf[pos + 16:pos + 16 + text_len].decode()
+ text = log_buf[pos + 16:pos + 16 + text_len].decode(
+ encoding='utf8', errors='replace')
time_stamp = utils.read_u64(log_buf[pos:pos + 8])
for line in text.splitlines():
- gdb.write("[{time:12.6f}] {line}\n".format(
+ msg = u"[{time:12.6f}] {line}\n".format(
time=time_stamp / 1000000000.0,
- line=line))
+ line=line)
+ # With python2 gdb.write will attempt to convert unicode to
+ # ascii and might fail so pass an utf8-encoded str instead.
+ if sys.hexversion < 0x03000000:
+ msg = msg.encode(encoding='utf8', errors='replace')
+ gdb.write(msg)
pos += length
diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
index 38b1f09d1cd9..086d27223c0c 100644
--- a/scripts/gdb/linux/proc.py
+++ b/scripts/gdb/linux/proc.py
@@ -16,6 +16,7 @@ from linux import constants
from linux import utils
from linux import tasks
from linux import lists
+from struct import *
class LxCmdLine(gdb.Command):
@@ -195,3 +196,75 @@ values of that process namespace"""
info_opts(MNT_INFO, m_flags)))
LxMounts()
+
+
+class LxFdtDump(gdb.Command):
+ """Output Flattened Device Tree header and dump FDT blob to the filename
+ specified as the command argument. Equivalent to
+ 'cat /proc/fdt > fdtdump.dtb' on a running target"""
+
+ def __init__(self):
+ super(LxFdtDump, self).__init__("lx-fdtdump", gdb.COMMAND_DATA,
+ gdb.COMPLETE_FILENAME)
+
+ def fdthdr_to_cpu(self, fdt_header):
+
+ fdt_header_be = ">IIIIIII"
+ fdt_header_le = "<IIIIIII"
+
+ if utils.get_target_endianness() == 1:
+ output_fmt = fdt_header_le
+ else:
+ output_fmt = fdt_header_be
+
+ return unpack(output_fmt, pack(fdt_header_be,
+ fdt_header['magic'],
+ fdt_header['totalsize'],
+ fdt_header['off_dt_struct'],
+ fdt_header['off_dt_strings'],
+ fdt_header['off_mem_rsvmap'],
+ fdt_header['version'],
+ fdt_header['last_comp_version']))
+
+ def invoke(self, arg, from_tty):
+
+ if not constants.LX_CONFIG_OF:
+ raise gdb.GdbError("Kernel not compiled with CONFIG_OF\n")
+
+ if len(arg) == 0:
+ filename = "fdtdump.dtb"
+ else:
+ filename = arg
+
+ py_fdt_header_ptr = gdb.parse_and_eval(
+ "(const struct fdt_header *) initial_boot_params")
+ py_fdt_header = py_fdt_header_ptr.dereference()
+
+ fdt_header = self.fdthdr_to_cpu(py_fdt_header)
+
+ if fdt_header[0] != constants.LX_OF_DT_HEADER:
+ raise gdb.GdbError("No flattened device tree magic found\n")
+
+ gdb.write("fdt_magic: 0x{:02X}\n".format(fdt_header[0]))
+ gdb.write("fdt_totalsize: 0x{:02X}\n".format(fdt_header[1]))
+ gdb.write("off_dt_struct: 0x{:02X}\n".format(fdt_header[2]))
+ gdb.write("off_dt_strings: 0x{:02X}\n".format(fdt_header[3]))
+ gdb.write("off_mem_rsvmap: 0x{:02X}\n".format(fdt_header[4]))
+ gdb.write("version: {}\n".format(fdt_header[5]))
+ gdb.write("last_comp_version: {}\n".format(fdt_header[6]))
+
+ inf = gdb.inferiors()[0]
+ fdt_buf = utils.read_memoryview(inf, py_fdt_header_ptr,
+ fdt_header[1]).tobytes()
+
+ try:
+ f = open(filename, 'wb')
+ except:
+ raise gdb.GdbError("Could not open file to dump fdt")
+
+ f.write(fdt_buf)
+ f.close()
+
+ gdb.write("Dumped fdt blob to " + filename + "\n")
+
+LxFdtDump()
diff --git a/security/Kconfig b/security/Kconfig
index d540bfe73190..e8e449444e65 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -163,6 +163,13 @@ config HARDENED_USERCOPY_PAGESPAN
been removed. This config is intended to be used only while
trying to find such users.
+config FORTIFY_SOURCE
+ bool "Harden common str/mem functions against buffer overflows"
+ depends on ARCH_HAS_FORTIFY_SOURCE
+ help
+ Detect overflows of buffers in common string and memory functions
+ where the compiler can determine and validate the buffer sizes.
+
config STATIC_USERMODEHELPER
bool "Force all usermode helper calls through a single binary"
help
diff --git a/security/keys/compat_dh.c b/security/keys/compat_dh.c
index a6a659b6bcb6..aa6b34cafe5f 100644
--- a/security/keys/compat_dh.c
+++ b/security/keys/compat_dh.c
@@ -33,6 +33,8 @@ long compat_keyctl_dh_compute(struct keyctl_dh_params __user *params,
kdfcopy.hashname = compat_ptr(compat_kdfcopy.hashname);
kdfcopy.otherinfo = compat_ptr(compat_kdfcopy.otherinfo);
kdfcopy.otherinfolen = compat_kdfcopy.otherinfolen;
+ memcpy(kdfcopy.__spare, compat_kdfcopy.__spare,
+ sizeof(kdfcopy.__spare));
return __keyctl_dh_compute(params, buffer, buflen, &kdfcopy);
}
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 4755d4b4f945..d1ea9f325f94 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -266,6 +266,11 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
if (kdfcopy) {
char *hashname;
+ if (memchr_inv(kdfcopy->__spare, 0, sizeof(kdfcopy->__spare))) {
+ ret = -EINVAL;
+ goto out1;
+ }
+
if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN ||
kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) {
ret = -EMSGSIZE;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 91bc6214ae57..1c02c6547038 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -198,7 +198,7 @@ struct request_key_auth {
void *callout_info;
size_t callout_len;
pid_t pid;
-};
+} __randomize_layout;
extern struct key_type key_type_request_key_auth;
extern struct key *request_key_auth_new(struct key *target,
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index b3d5bed75029..22995cb3bd44 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -238,10 +238,8 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
{
if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
return false;
- /* check architectures that return -EINVAL from dma_mmap_coherent() */
- /* FIXME: this should be some global flag */
-#if defined(CONFIG_C6X) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) ||\
- defined(CONFIG_PARISC) || defined(CONFIG_XTENSA)
+ /* architecture supports dma_mmap_coherent()? */
+#if defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) || !defined(CONFIG_HAS_DMA)
if (!substream->ops->mmap &&
substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
return false;
@@ -3502,7 +3500,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
}
#endif /* CONFIG_GENERIC_ALLOCATOR */
#ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
- if (!substream->ops->page &&
+ if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
return dma_mmap_coherent(substream->dma_buffer.dev.dev,
area,
diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
index bc345d564f8d..db76a5bf2bd2 100644
--- a/sound/drivers/opl4/opl4_lib.c
+++ b/sound/drivers/opl4/opl4_lib.c
@@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
MODULE_DESCRIPTION("OPL4 driver");
MODULE_LICENSE("GPL");
-static void inline snd_opl4_wait(struct snd_opl4 *opl4)
+static inline void snd_opl4_wait(struct snd_opl4 *opl4)
{
int timeout = 10;
while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
diff --git a/sound/isa/msnd/msnd_midi.c b/sound/isa/msnd/msnd_midi.c
index 912b5a9ccbab..013d8d1170fe 100644
--- a/sound/isa/msnd/msnd_midi.c
+++ b/sound/isa/msnd/msnd_midi.c
@@ -120,24 +120,24 @@ void snd_msndmidi_input_read(void *mpuv)
unsigned long flags;
struct snd_msndmidi *mpu = mpuv;
void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
+ u16 head, tail, size;
spin_lock_irqsave(&mpu->input_lock, flags);
- while (readw(mpu->dev->MIDQ + JQS_wTail) !=
- readw(mpu->dev->MIDQ + JQS_wHead)) {
- u16 wTmp, val;
- val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead));
-
- if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER,
- &mpu->mode))
- snd_rawmidi_receive(mpu->substream_input,
- (unsigned char *)&val, 1);
-
- wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1;
- if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize))
- writew(0, mpu->dev->MIDQ + JQS_wHead);
- else
- writew(wTmp, mpu->dev->MIDQ + JQS_wHead);
+ head = readw(mpu->dev->MIDQ + JQS_wHead);
+ tail = readw(mpu->dev->MIDQ + JQS_wTail);
+ size = readw(mpu->dev->MIDQ + JQS_wSize);
+ if (head > size || tail > size)
+ goto out;
+ while (head != tail) {
+ unsigned char val = readw(pwMIDQData + 2 * head);
+
+ if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode))
+ snd_rawmidi_receive(mpu->substream_input, &val, 1);
+ if (++head > size)
+ head = 0;
+ writew(head, mpu->dev->MIDQ + JQS_wHead);
}
+ out:
spin_unlock_irqrestore(&mpu->input_lock, flags);
}
EXPORT_SYMBOL(snd_msndmidi_input_read);
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index ad4897337df5..fc4fb1904aef 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -170,23 +170,24 @@ static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id)
{
struct snd_msnd *chip = dev_id;
void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF;
+ u16 head, tail, size;
/* Send ack to DSP */
/* inb(chip->io + HP_RXL); */
/* Evaluate queued DSP messages */
- while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) {
- u16 wTmp;
-
- snd_msnd_eval_dsp_msg(chip,
- readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead)));
-
- wTmp = readw(chip->DSPQ + JQS_wHead) + 1;
- if (wTmp > readw(chip->DSPQ + JQS_wSize))
- writew(0, chip->DSPQ + JQS_wHead);
- else
- writew(wTmp, chip->DSPQ + JQS_wHead);
+ head = readw(chip->DSPQ + JQS_wHead);
+ tail = readw(chip->DSPQ + JQS_wTail);
+ size = readw(chip->DSPQ + JQS_wSize);
+ if (head > size || tail > size)
+ goto out;
+ while (head != tail) {
+ snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head));
+ if (++head > size)
+ head = 0;
+ writew(head, chip->DSPQ + JQS_wHead);
}
+ out:
/* Send ack to DSP */
inb(chip->io + HP_RXL);
return IRQ_HANDLED;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 76c85f08bea6..d549f35f39d3 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -53,9 +53,11 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
#define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
#define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
#define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
+#define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \
+ ((codec)->core.vendor_id == 0x80862800))
#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
|| is_skylake(codec) || is_broxton(codec) \
- || is_kabylake(codec))
+ || is_kabylake(codec)) || is_geminilake(codec)
#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
@@ -3790,6 +3792,7 @@ HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_i915_hsw_hdmi),
HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi),
HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi),
HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
+HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index cd6987b5c6d9..45d58fc1df39 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -379,6 +379,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
break;
case 0x10ec0899:
case 0x10ec0900:
+ case 0x10ec1168:
case 0x10ec1220:
alc_update_coef_idx(codec, 0x7, 1<<1, 0);
break;
@@ -5179,6 +5180,7 @@ enum {
ALC233_FIXUP_ASUS_MIC_NO_PRESENCE,
ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
ALC233_FIXUP_LENOVO_MULTI_CODECS,
+ ALC294_FIXUP_LENOVO_MIC_LOCATION,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -5962,6 +5964,18 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc233_alc662_fixup_lenovo_dual_codecs,
},
+ [ALC294_FIXUP_LENOVO_MIC_LOCATION] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ /* Change the mic location from front to right, otherwise there are
+ two front mics with the same name, pulseaudio can't handle them.
+ This is just a temporary workaround, after applying this fixup,
+ there will be one "Front Mic" and one "Mic" in this machine.
+ */
+ { 0x1a, 0x04a19040 },
+ { }
+ },
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6143,6 +6157,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
@@ -7801,6 +7816,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882),
HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882),
+ HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882),
HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882),
{} /* terminator */
};
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 7e0405e1651d..412a7c82995a 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -120,7 +120,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
size_t insns_cnt, int strict_alignment,
const char *license, __u32 kern_version,
- char *log_buf, size_t log_buf_sz)
+ char *log_buf, size_t log_buf_sz, int log_level)
{
union bpf_attr attr;
@@ -131,7 +131,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
attr.license = ptr_to_u64(license);
attr.log_buf = ptr_to_u64(log_buf);
attr.log_size = log_buf_sz;
- attr.log_level = 2;
+ attr.log_level = log_level;
log_buf[0] = 0;
attr.kern_version = kern_version;
attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 16de44a14b48..418c86e69bcb 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -38,7 +38,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
size_t insns_cnt, int strict_alignment,
const char *license, __u32 kern_version,
- char *log_buf, size_t log_buf_sz);
+ char *log_buf, size_t log_buf_sz, int log_level);
int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags);
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 0a8a1c45af87..a1497c516d85 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -643,7 +643,7 @@ static const struct {
{ "__GFP_FS", "F" },
{ "__GFP_COLD", "CO" },
{ "__GFP_NOWARN", "NWR" },
- { "__GFP_REPEAT", "R" },
+ { "__GFP_RETRY_MAYFAIL", "R" },
{ "__GFP_NOFAIL", "NF" },
{ "__GFP_NORETRY", "NR" },
{ "__GFP_COMP", "C" },
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index bccebd935907..29793694cbc7 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -380,7 +380,7 @@ static int do_test_single(struct bpf_align_test *test)
prog_len = probe_filter_length(prog);
fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
prog, prog_len, 1, "GPL", 0,
- bpf_vlog, sizeof(bpf_vlog));
+ bpf_vlog, sizeof(bpf_vlog), 2);
if (fd_prog < 0) {
printf("Failed to load program.\n");
printf("%s", bpf_vlog);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 404aec520812..af7d173910f4 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -4969,7 +4969,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val), 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -4995,7 +4995,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val) + 1, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -5023,7 +5023,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val) - 20, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -5050,7 +5050,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val) - 19, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -5510,6 +5510,476 @@ static struct bpf_test tests[] = {
.errstr = "invalid bpf_context access",
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
+ {
+ "bounds checks mixing signed and unsigned, positive bounds",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R8 invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R8 invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 5",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 invalid mem access",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_6, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R4 min value is negative, either use unsigned",
+ .errstr = "R4 min value is negative, either use unsigned",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 7",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 8",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024 + 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 9",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 10",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 11",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 12",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ /* Dead branch. */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 13",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 14",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 15",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -7),
+ },
+ .fixup_map1 = { 4 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "bounds checks mixing signed and unsigned, variant 16",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -5633,7 +6103,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
- "GPL", 0, bpf_vlog, sizeof(bpf_vlog));
+ "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
expected_ret = unpriv && test->result_unpriv != UNDEF ?
test->result_unpriv : test->result;
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
new file mode 100644
index 000000000000..b9302cc82c12
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
@@ -0,0 +1,36 @@
+#!/bin/sh
+# description: Kprobe event auto/manual naming
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+disable_events
+echo > kprobe_events
+
+:;: "Add an event on function without name" ;:
+
+FUNC=`grep " [tT] .*vfs_read$" /proc/kallsyms | tail -n 1 | cut -f 3 -d " "`
+[ "x" != "x$FUNC" ] || exit_unresolved
+echo "p $FUNC" > kprobe_events
+PROBE_NAME=`echo $FUNC | tr ".:" "_"`
+test -d events/kprobes/p_${PROBE_NAME}_0 || exit_failure
+
+:;: "Add an event on function with new name" ;:
+
+echo "p:event1 $FUNC" > kprobe_events
+test -d events/kprobes/event1 || exit_failure
+
+:;: "Add an event on function with new name and group" ;:
+
+echo "p:kprobes2/event2 $FUNC" > kprobe_events
+test -d events/kprobes2/event2 || exit_failure
+
+:;: "Add an event on dot function without name" ;:
+
+FUNC=`grep -m 10 " [tT] .*\.isra\..*$" /proc/kallsyms | tail -n 1 | cut -f 3 -d " "`
+[ "x" != "x$FUNC" ] || exit_unresolved
+echo "p $FUNC" > kprobe_events
+EVENT=`grep $FUNC kprobe_events | cut -f 1 -d " " | cut -f 2 -d:`
+[ "x" != "x$EVENT" ] || exit_failure
+test -d events/$EVENT || exit_failure
+
+echo > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_module.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_module.tc
new file mode 100644
index 000000000000..6d634e4b7680
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_module.tc
@@ -0,0 +1,28 @@
+#!/bin/sh
+# description: Kprobe dynamic event - probing module
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+disable_events
+echo > kprobe_events
+
+:;: "Add an event on a module function without specifying event name" ;:
+
+MOD=`lsmod | head -n 2 | tail -n 1 | cut -f1 -d" "`
+FUNC=`grep -m 1 ".* t .*\\[$MOD\\]" /proc/kallsyms | xargs | cut -f3 -d" "`
+[ "x" != "x$MOD" -a "y" != "y$FUNC" ] || exit_unresolved
+echo "p $MOD:$FUNC" > kprobe_events
+PROBE_NAME=`echo $MOD:$FUNC | tr ".:" "_"`
+test -d events/kprobes/p_${PROBE_NAME}_0 || exit_failure
+
+:;: "Add an event on a module function with new event name" ;:
+
+echo "p:event1 $MOD:$FUNC" > kprobe_events
+test -d events/kprobes/event1 || exit_failure
+
+:;: "Add an event on a module function with new event and group name" ;:
+
+echo "p:kprobes1/event1 $MOD:$FUNC" > kprobe_events
+test -d events/kprobes1/event1 || exit_failure
+
+echo > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
index f4d1ff785d67..2a1cb9908746 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
@@ -2,10 +2,10 @@
# description: Register/unregister many kprobe events
# ftrace fentry skip size depends on the machine architecture.
-# Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc
+# Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc64le
case `uname -m` in
x86_64|i[3456]86) OFFS=5;;
- ppc*) OFFS=4;;
+ ppc64le) OFFS=8;;
*) OFFS=0;;
esac
diff --git a/tools/testing/selftests/kmod/Makefile b/tools/testing/selftests/kmod/Makefile
new file mode 100644
index 000000000000..fa2ccc5fb3de
--- /dev/null
+++ b/tools/testing/selftests/kmod/Makefile
@@ -0,0 +1,11 @@
+# Makefile for kmod loading selftests
+
+# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
+all:
+
+TEST_PROGS := kmod.sh
+
+include ../lib.mk
+
+# Nothing to clean up.
+clean:
diff --git a/tools/testing/selftests/kmod/config b/tools/testing/selftests/kmod/config
new file mode 100644
index 000000000000..259f4fd6b5e2
--- /dev/null
+++ b/tools/testing/selftests/kmod/config
@@ -0,0 +1,7 @@
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_LKM=m
+CONFIG_XFS_FS=m
+
+# For the module parameter force_init_test is used
+CONFIG_TUN=m
+CONFIG_BTRFS_FS=m
diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh
new file mode 100644
index 000000000000..8cecae9a8bca
--- /dev/null
+++ b/tools/testing/selftests/kmod/kmod.sh
@@ -0,0 +1,615 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or at your option any
+# later version; or, when distributed separately from the Linux kernel or
+# when incorporated into other software packages, subject to the following
+# license:
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of copyleft-next (version 0.3.1 or later) as published
+# at http://copyleft-next.org/.
+
+# This is a stress test script for kmod, the kernel module loader. It uses
+# test_kmod which exposes a series of knobs for the API for us so we can
+# tweak each test in userspace rather than in kernelspace.
+#
+# The way kmod works is it uses the kernel's usermode helper API to eventually
+# call /sbin/modprobe. It has a limit of the number of concurrent calls
+# possible. The kernel interface to load modules is request_module(), however
+# mount uses get_fs_type(). Both behave slightly differently, but the
+# differences are important enough to test each call separately. For this
+# reason test_kmod starts by providing tests for both calls.
+#
+# The test driver test_kmod assumes a series of defaults which you can
+# override by exporting to your environment prior running this script.
+# For instance this script assumes you do not have xfs loaded upon boot.
+# If this is false, export DEFAULT_KMOD_FS="ext4" prior to running this
+# script if the filesyste module you don't have loaded upon bootup
+# is ext4 instead. Refer to allow_user_defaults() for a list of user
+# override variables possible.
+#
+# You'll want at least 4 GiB of RAM to expect to run these tests
+# without running out of memory on them. For other requirements refer
+# to test_reqs()
+
+set -e
+
+TEST_NAME="kmod"
+TEST_DRIVER="test_${TEST_NAME}"
+TEST_DIR=$(dirname $0)
+
+# This represents
+#
+# TEST_ID:TEST_COUNT:ENABLED
+#
+# TEST_ID: is the test id number
+# TEST_COUNT: number of times we should run the test
+# ENABLED: 1 if enabled, 0 otherwise
+#
+# Once these are enabled please leave them as-is. Write your own test,
+# we have tons of space.
+ALL_TESTS="0001:3:1"
+ALL_TESTS="$ALL_TESTS 0002:3:1"
+ALL_TESTS="$ALL_TESTS 0003:1:1"
+ALL_TESTS="$ALL_TESTS 0004:1:1"
+ALL_TESTS="$ALL_TESTS 0005:10:1"
+ALL_TESTS="$ALL_TESTS 0006:10:1"
+ALL_TESTS="$ALL_TESTS 0007:5:1"
+ALL_TESTS="$ALL_TESTS 0008:150:1"
+ALL_TESTS="$ALL_TESTS 0009:150:1"
+
+test_modprobe()
+{
+ if [ ! -d $DIR ]; then
+ echo "$0: $DIR not present" >&2
+ echo "You must have the following enabled in your kernel:" >&2
+ cat $TEST_DIR/config >&2
+ exit 1
+ fi
+}
+
+function allow_user_defaults()
+{
+ if [ -z $DEFAULT_KMOD_DRIVER ]; then
+ DEFAULT_KMOD_DRIVER="test_module"
+ fi
+
+ if [ -z $DEFAULT_KMOD_FS ]; then
+ DEFAULT_KMOD_FS="xfs"
+ fi
+
+ if [ -z $PROC_DIR ]; then
+ PROC_DIR="/proc/sys/kernel/"
+ fi
+
+ if [ -z $MODPROBE_LIMIT ]; then
+ MODPROBE_LIMIT=50
+ fi
+
+ if [ -z $DIR ]; then
+ DIR="/sys/devices/virtual/misc/${TEST_DRIVER}0/"
+ fi
+
+ if [ -z $DEFAULT_NUM_TESTS ]; then
+ DEFAULT_NUM_TESTS=150
+ fi
+
+ MODPROBE_LIMIT_FILE="${PROC_DIR}/kmod-limit"
+}
+
+test_reqs()
+{
+ if ! which modprobe 2> /dev/null > /dev/null; then
+ echo "$0: You need modprobe installed" >&2
+ exit 1
+ fi
+
+ if ! which kmod 2> /dev/null > /dev/null; then
+ echo "$0: You need kmod installed" >&2
+ exit 1
+ fi
+
+ # kmod 19 has a bad bug where it returns 0 when modprobe
+ # gets called *even* if the module was not loaded due to
+ # some bad heuristics. For details see:
+ #
+ # A work around is possible in-kernel but its rather
+ # complex.
+ KMOD_VERSION=$(kmod --version | awk '{print $3}')
+ if [[ $KMOD_VERSION -le 19 ]]; then
+ echo "$0: You need at least kmod 20" >&2
+ echo "kmod <= 19 is buggy, for details see:" >&2
+ echo "http://git.kernel.org/cgit/utils/kernel/kmod/kmod.git/commit/libkmod/libkmod-module.c?id=fd44a98ae2eb5eb32161088954ab21e58e19dfc4" >&2
+ exit 1
+ fi
+
+ uid=$(id -u)
+ if [ $uid -ne 0 ]; then
+ echo $msg must be run as root >&2
+ exit 0
+ fi
+}
+
+function load_req_mod()
+{
+ trap "test_modprobe" EXIT
+
+ if [ ! -d $DIR ]; then
+ # Alanis: "Oh isn't it ironic?"
+ modprobe $TEST_DRIVER
+ fi
+}
+
+test_finish()
+{
+ echo "Test completed"
+}
+
+errno_name_to_val()
+{
+ case "$1" in
+ # kmod calls modprobe and upon of a module not found
+ # modprobe returns just 1... However in the kernel we
+ # *sometimes* see 256...
+ MODULE_NOT_FOUND)
+ echo 256;;
+ SUCCESS)
+ echo 0;;
+ -EPERM)
+ echo -1;;
+ -ENOENT)
+ echo -2;;
+ -EINVAL)
+ echo -22;;
+ -ERR_ANY)
+ echo -123456;;
+ *)
+ echo invalid;;
+ esac
+}
+
+errno_val_to_name()
+ case "$1" in
+ 256)
+ echo MODULE_NOT_FOUND;;
+ 0)
+ echo SUCCESS;;
+ -1)
+ echo -EPERM;;
+ -2)
+ echo -ENOENT;;
+ -22)
+ echo -EINVAL;;
+ -123456)
+ echo -ERR_ANY;;
+ *)
+ echo invalid;;
+ esac
+
+config_set_test_case_driver()
+{
+ if ! echo -n 1 >$DIR/config_test_case; then
+ echo "$0: Unable to set to test case to driver" >&2
+ exit 1
+ fi
+}
+
+config_set_test_case_fs()
+{
+ if ! echo -n 2 >$DIR/config_test_case; then
+ echo "$0: Unable to set to test case to fs" >&2
+ exit 1
+ fi
+}
+
+config_num_threads()
+{
+ if ! echo -n $1 >$DIR/config_num_threads; then
+ echo "$0: Unable to set to number of threads" >&2
+ exit 1
+ fi
+}
+
+config_get_modprobe_limit()
+{
+ if [[ -f ${MODPROBE_LIMIT_FILE} ]] ; then
+ MODPROBE_LIMIT=$(cat $MODPROBE_LIMIT_FILE)
+ fi
+ echo $MODPROBE_LIMIT
+}
+
+config_num_thread_limit_extra()
+{
+ MODPROBE_LIMIT=$(config_get_modprobe_limit)
+ let EXTRA_LIMIT=$MODPROBE_LIMIT+$1
+ config_num_threads $EXTRA_LIMIT
+}
+
+# For special characters use printf directly,
+# refer to kmod_test_0001
+config_set_driver()
+{
+ if ! echo -n $1 >$DIR/config_test_driver; then
+ echo "$0: Unable to set driver" >&2
+ exit 1
+ fi
+}
+
+config_set_fs()
+{
+ if ! echo -n $1 >$DIR/config_test_fs; then
+ echo "$0: Unable to set driver" >&2
+ exit 1
+ fi
+}
+
+config_get_driver()
+{
+ cat $DIR/config_test_driver
+}
+
+config_get_test_result()
+{
+ cat $DIR/test_result
+}
+
+config_reset()
+{
+ if ! echo -n "1" >"$DIR"/reset; then
+ echo "$0: reset shuld have worked" >&2
+ exit 1
+ fi
+}
+
+config_show_config()
+{
+ echo "----------------------------------------------------"
+ cat "$DIR"/config
+ echo "----------------------------------------------------"
+}
+
+config_trigger()
+{
+ if ! echo -n "1" >"$DIR"/trigger_config 2>/dev/null; then
+ echo "$1: FAIL - loading should have worked"
+ config_show_config
+ exit 1
+ fi
+ echo "$1: OK! - loading kmod test"
+}
+
+config_trigger_want_fail()
+{
+ if echo "1" > $DIR/trigger_config 2>/dev/null; then
+ echo "$1: FAIL - test case was expected to fail"
+ config_show_config
+ exit 1
+ fi
+ echo "$1: OK! - kmod test case failed as expected"
+}
+
+config_expect_result()
+{
+ RC=$(config_get_test_result)
+ RC_NAME=$(errno_val_to_name $RC)
+
+ ERRNO_NAME=$2
+ ERRNO=$(errno_name_to_val $ERRNO_NAME)
+
+ if [[ $ERRNO_NAME = "-ERR_ANY" ]]; then
+ if [[ $RC -ge 0 ]]; then
+ echo "$1: FAIL, test expects $ERRNO_NAME - got $RC_NAME ($RC)" >&2
+ config_show_config
+ exit 1
+ fi
+ elif [[ $RC != $ERRNO ]]; then
+ echo "$1: FAIL, test expects $ERRNO_NAME ($ERRNO) - got $RC_NAME ($RC)" >&2
+ config_show_config
+ exit 1
+ fi
+ echo "$1: OK! - Return value: $RC ($RC_NAME), expected $ERRNO_NAME"
+}
+
+kmod_defaults_driver()
+{
+ config_reset
+ modprobe -r $DEFAULT_KMOD_DRIVER
+ config_set_driver $DEFAULT_KMOD_DRIVER
+}
+
+kmod_defaults_fs()
+{
+ config_reset
+ modprobe -r $DEFAULT_KMOD_FS
+ config_set_fs $DEFAULT_KMOD_FS
+ config_set_test_case_fs
+}
+
+kmod_test_0001_driver()
+{
+ NAME='\000'
+
+ kmod_defaults_driver
+ config_num_threads 1
+ printf '\000' >"$DIR"/config_test_driver
+ config_trigger ${FUNCNAME[0]}
+ config_expect_result ${FUNCNAME[0]} MODULE_NOT_FOUND
+}
+
+kmod_test_0001_fs()
+{
+ NAME='\000'
+
+ kmod_defaults_fs
+ config_num_threads 1
+ printf '\000' >"$DIR"/config_test_fs
+ config_trigger ${FUNCNAME[0]}
+ config_expect_result ${FUNCNAME[0]} -EINVAL
+}
+
+kmod_test_0001()
+{
+ kmod_test_0001_driver
+ kmod_test_0001_fs
+}
+
+kmod_test_0002_driver()
+{
+ NAME="nope-$DEFAULT_KMOD_DRIVER"
+
+ kmod_defaults_driver
+ config_set_driver $NAME
+ config_num_threads 1
+ config_trigger ${FUNCNAME[0]}
+ config_expect_result ${FUNCNAME[0]} MODULE_NOT_FOUND
+}
+
+kmod_test_0002_fs()
+{
+ NAME="nope-$DEFAULT_KMOD_FS"
+
+ kmod_defaults_fs
+ config_set_fs $NAME
+ config_trigger ${FUNCNAME[0]}
+ config_expect_result ${FUNCNAME[0]} -EINVAL
+}
+
+kmod_test_0002()
+{
+ kmod_test_0002_driver
+ kmod_test_0002_fs
+}
+
+kmod_test_0003()
+{
+ kmod_defaults_fs
+ config_num_threads 1
+ config_trigger ${FUNCNAME[0]}
+ config_expect_result ${FUNCNAME[0]} SUCCESS
+}
+
+kmod_test_0004()
+{
+ kmod_defaults_fs
+ config_num_threads 2
+ config_trigger ${FUNCNAME[0]}
+ config_expect_result ${FUNCNAME[0]} SUCCESS
+}
+
+kmod_test_0005()
+{
+ kmod_defaults_driver
+ config_trigger ${FUNCNAME[0]}
+ config_expect_result ${FUNCNAME[0]} SUCCESS
+}
+
+kmod_test_0006()
+{
+ kmod_defaults_fs
+ config_trigger ${FUNCNAME[0]}
+ config_expect_result ${FUNCNAME[0]} SUCCESS
+}
+
+kmod_test_0007()
+{
+ kmod_test_0005
+ kmod_test_0006
+}
+
+kmod_test_0008()
+{
+ kmod_defaults_driver
+ MODPROBE_LIMIT=$(config_get_modprobe_limit)
+ let EXTRA=$MODPROBE_LIMIT/6
+ config_num_thread_limit_extra $EXTRA
+ config_trigger ${FUNCNAME[0]}
+ config_expect_result ${FUNCNAME[0]} SUCCESS
+}
+
+kmod_test_0009()
+{
+ kmod_defaults_fs
+ MODPROBE_LIMIT=$(config_get_modprobe_limit)
+ let EXTRA=$MODPROBE_LIMIT/4
+ config_num_thread_limit_extra $EXTRA
+ config_trigger ${FUNCNAME[0]}
+ config_expect_result ${FUNCNAME[0]} SUCCESS
+}
+
+list_tests()
+{
+ echo "Test ID list:"
+ echo
+ echo "TEST_ID x NUM_TEST"
+ echo "TEST_ID: Test ID"
+ echo "NUM_TESTS: Number of recommended times to run the test"
+ echo
+ echo "0001 x $(get_test_count 0001) - Simple test - 1 thread for empty string"
+ echo "0002 x $(get_test_count 0002) - Simple test - 1 thread for modules/filesystems that do not exist"
+ echo "0003 x $(get_test_count 0003) - Simple test - 1 thread for get_fs_type() only"
+ echo "0004 x $(get_test_count 0004) - Simple test - 2 threads for get_fs_type() only"
+ echo "0005 x $(get_test_count 0005) - multithreaded tests with default setup - request_module() only"
+ echo "0006 x $(get_test_count 0006) - multithreaded tests with default setup - get_fs_type() only"
+ echo "0007 x $(get_test_count 0007) - multithreaded tests with default setup test request_module() and get_fs_type()"
+ echo "0008 x $(get_test_count 0008) - multithreaded - push kmod_concurrent over max_modprobes for request_module()"
+ echo "0009 x $(get_test_count 0009) - multithreaded - push kmod_concurrent over max_modprobes for get_fs_type()"
+}
+
+usage()
+{
+ NUM_TESTS=$(grep -o ' ' <<<"$ALL_TESTS" | grep -c .)
+ let NUM_TESTS=$NUM_TESTS+1
+ MAX_TEST=$(printf "%04d\n" $NUM_TESTS)
+ echo "Usage: $0 [ -t <4-number-digit> ] | [ -w <4-number-digit> ] |"
+ echo " [ -s <4-number-digit> ] | [ -c <4-number-digit> <test- count>"
+ echo " [ all ] [ -h | --help ] [ -l ]"
+ echo ""
+ echo "Valid tests: 0001-$MAX_TEST"
+ echo ""
+ echo " all Runs all tests (default)"
+ echo " -t Run test ID the number amount of times is recommended"
+ echo " -w Watch test ID run until it runs into an error"
+ echo " -c Run test ID once"
+ echo " -s Run test ID x test-count number of times"
+ echo " -l List all test ID list"
+ echo " -h|--help Help"
+ echo
+ echo "If an error every occurs execution will immediately terminate."
+ echo "If you are adding a new test try using -w <test-ID> first to"
+ echo "make sure the test passes a series of tests."
+ echo
+ echo Example uses:
+ echo
+ echo "${TEST_NAME}.sh -- executes all tests"
+ echo "${TEST_NAME}.sh -t 0008 -- Executes test ID 0008 number of times is recomended"
+ echo "${TEST_NAME}.sh -w 0008 -- Watch test ID 0008 run until an error occurs"
+ echo "${TEST_NAME}.sh -s 0008 -- Run test ID 0008 once"
+ echo "${TEST_NAME}.sh -c 0008 3 -- Run test ID 0008 three times"
+ echo
+ list_tests
+ exit 1
+}
+
+function test_num()
+{
+ re='^[0-9]+$'
+ if ! [[ $1 =~ $re ]]; then
+ usage
+ fi
+}
+
+function get_test_count()
+{
+ test_num $1
+ TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
+ LAST_TWO=${TEST_DATA#*:*}
+ echo ${LAST_TWO%:*}
+}
+
+function get_test_enabled()
+{
+ test_num $1
+ TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
+ echo ${TEST_DATA#*:*:}
+}
+
+function run_all_tests()
+{
+ for i in $ALL_TESTS ; do
+ TEST_ID=${i%:*:*}
+ ENABLED=$(get_test_enabled $TEST_ID)
+ TEST_COUNT=$(get_test_count $TEST_ID)
+ if [[ $ENABLED -eq "1" ]]; then
+ test_case $TEST_ID $TEST_COUNT
+ fi
+ done
+}
+
+function watch_log()
+{
+ if [ $# -ne 3 ]; then
+ clear
+ fi
+ date
+ echo "Running test: $2 - run #$1"
+}
+
+function watch_case()
+{
+ i=0
+ while [ 1 ]; do
+
+ if [ $# -eq 1 ]; then
+ test_num $1
+ watch_log $i ${TEST_NAME}_test_$1
+ ${TEST_NAME}_test_$1
+ else
+ watch_log $i all
+ run_all_tests
+ fi
+ let i=$i+1
+ done
+}
+
+function test_case()
+{
+ NUM_TESTS=$DEFAULT_NUM_TESTS
+ if [ $# -eq 2 ]; then
+ NUM_TESTS=$2
+ fi
+
+ i=0
+ while [ $i -lt $NUM_TESTS ]; do
+ test_num $1
+ watch_log $i ${TEST_NAME}_test_$1 noclear
+ RUN_TEST=${TEST_NAME}_test_$1
+ $RUN_TEST
+ let i=$i+1
+ done
+}
+
+function parse_args()
+{
+ if [ $# -eq 0 ]; then
+ run_all_tests
+ else
+ if [[ "$1" = "all" ]]; then
+ run_all_tests
+ elif [[ "$1" = "-w" ]]; then
+ shift
+ watch_case $@
+ elif [[ "$1" = "-t" ]]; then
+ shift
+ test_num $1
+ test_case $1 $(get_test_count $1)
+ elif [[ "$1" = "-c" ]]; then
+ shift
+ test_num $1
+ test_num $2
+ test_case $1 $2
+ elif [[ "$1" = "-s" ]]; then
+ shift
+ test_case $1 1
+ elif [[ "$1" = "-l" ]]; then
+ list_tests
+ elif [[ "$1" = "-h" || "$1" = "--help" ]]; then
+ usage
+ else
+ usage
+ fi
+ fi
+}
+
+test_reqs
+allow_user_defaults
+load_req_mod
+
+trap "test_finish" EXIT
+
+parse_args $@
+
+exit 0
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
index 13f5198ba0ee..1c12b5855e4f 100755
--- a/tools/testing/selftests/ntb/ntb_test.sh
+++ b/tools/testing/selftests/ntb/ntb_test.sh
@@ -18,6 +18,7 @@ LIST_DEVS=FALSE
DEBUGFS=${DEBUGFS-/sys/kernel/debug}
+DB_BITMASK=0x7FFF
PERF_RUN_ORDER=32
MAX_MW_SIZE=0
RUN_DMA_TESTS=
@@ -38,6 +39,7 @@ function show_help()
echo "be highly recommended."
echo
echo "Options:"
+ echo " -b BITMASK doorbell clear bitmask for ntb_tool"
echo " -C don't cleanup ntb modules on exit"
echo " -d run dma tests"
echo " -h show this help message"
@@ -52,8 +54,9 @@ function show_help()
function parse_args()
{
OPTIND=0
- while getopts "Cdhlm:r:p:w:" opt; do
+ while getopts "b:Cdhlm:r:p:w:" opt; do
case "$opt" in
+ b) DB_BITMASK=${OPTARG} ;;
C) DONT_CLEANUP=1 ;;
d) RUN_DMA_TESTS=1 ;;
h) show_help; exit 0 ;;
@@ -85,6 +88,10 @@ set -e
function _modprobe()
{
modprobe "$@"
+
+ if [[ "$REMOTE_HOST" != "" ]]; then
+ ssh "$REMOTE_HOST" modprobe "$@"
+ fi
}
function split_remote()
@@ -154,7 +161,7 @@ function doorbell_test()
echo "Running db tests on: $(basename $LOC) / $(basename $REM)"
- write_file "c 0xFFFFFFFF" "$REM/db"
+ write_file "c $DB_BITMASK" "$REM/db"
for ((i=1; i <= 8; i++)); do
let DB=$(read_file "$REM/db") || true
diff --git a/tools/testing/selftests/sysctl/Makefile b/tools/testing/selftests/sysctl/Makefile
index b3c33e071f10..95c320b354e8 100644
--- a/tools/testing/selftests/sysctl/Makefile
+++ b/tools/testing/selftests/sysctl/Makefile
@@ -4,8 +4,7 @@
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests".
all:
-TEST_PROGS := run_numerictests run_stringtests
-TEST_FILES := common_tests
+TEST_PROGS := sysctl.sh
include ../lib.mk
diff --git a/tools/testing/selftests/sysctl/common_tests b/tools/testing/selftests/sysctl/common_tests
deleted file mode 100644
index b6862322962f..000000000000
--- a/tools/testing/selftests/sysctl/common_tests
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/bin/sh
-
-TEST_FILE=$(mktemp)
-
-echo "== Testing sysctl behavior against ${TARGET} =="
-
-set_orig()
-{
- echo "${ORIG}" > "${TARGET}"
-}
-
-set_test()
-{
- echo "${TEST_STR}" > "${TARGET}"
-}
-
-verify()
-{
- local seen
- seen=$(cat "$1")
- if [ "${seen}" != "${TEST_STR}" ]; then
- return 1
- fi
- return 0
-}
-
-exit_test()
-{
- if [ ! -z ${old_strict} ]; then
- echo ${old_strict} > ${WRITES_STRICT}
- fi
- exit $rc
-}
-
-trap 'set_orig; rm -f "${TEST_FILE}"' EXIT
-
-rc=0
-
-echo -n "Writing test file ... "
-echo "${TEST_STR}" > "${TEST_FILE}"
-if ! verify "${TEST_FILE}"; then
- echo "FAIL" >&2
- exit 1
-else
- echo "ok"
-fi
-
-echo -n "Checking sysctl is not set to test value ... "
-if verify "${TARGET}"; then
- echo "FAIL" >&2
- exit 1
-else
- echo "ok"
-fi
-
-echo -n "Writing sysctl from shell ... "
-set_test
-if ! verify "${TARGET}"; then
- echo "FAIL" >&2
- exit 1
-else
- echo "ok"
-fi
-
-echo -n "Resetting sysctl to original value ... "
-set_orig
-if verify "${TARGET}"; then
- echo "FAIL" >&2
- exit 1
-else
- echo "ok"
-fi
-
-echo -n "Checking write strict setting ... "
-WRITES_STRICT="${SYSCTL}/kernel/sysctl_writes_strict"
-if [ ! -e ${WRITES_STRICT} ]; then
- echo "FAIL, but skip in case of old kernel" >&2
-else
- old_strict=$(cat ${WRITES_STRICT})
- if [ "$old_strict" = "1" ]; then
- echo "ok"
- else
- echo "FAIL, strict value is 0 but force to 1 to continue" >&2
- echo "1" > ${WRITES_STRICT}
- fi
-fi
-
-# Now that we've validated the sanity of "set_test" and "set_orig",
-# we can use those functions to set starting states before running
-# specific behavioral tests.
-
-echo -n "Writing entire sysctl in single write ... "
-set_orig
-dd if="${TEST_FILE}" of="${TARGET}" bs=4096 2>/dev/null
-if ! verify "${TARGET}"; then
- echo "FAIL" >&2
- rc=1
-else
- echo "ok"
-fi
-
-echo -n "Writing middle of sysctl after synchronized seek ... "
-set_test
-dd if="${TEST_FILE}" of="${TARGET}" bs=1 seek=1 skip=1 2>/dev/null
-if ! verify "${TARGET}"; then
- echo "FAIL" >&2
- rc=1
-else
- echo "ok"
-fi
-
-echo -n "Writing beyond end of sysctl ... "
-set_orig
-dd if="${TEST_FILE}" of="${TARGET}" bs=20 seek=2 2>/dev/null
-if verify "${TARGET}"; then
- echo "FAIL" >&2
- rc=1
-else
- echo "ok"
-fi
-
-echo -n "Writing sysctl with multiple long writes ... "
-set_orig
-(perl -e 'print "A" x 50;'; echo "${TEST_STR}") | \
- dd of="${TARGET}" bs=50 2>/dev/null
-if verify "${TARGET}"; then
- echo "FAIL" >&2
- rc=1
-else
- echo "ok"
-fi
diff --git a/tools/testing/selftests/sysctl/config b/tools/testing/selftests/sysctl/config
new file mode 100644
index 000000000000..6ca14800d755
--- /dev/null
+++ b/tools/testing/selftests/sysctl/config
@@ -0,0 +1 @@
+CONFIG_TEST_SYSCTL=y
diff --git a/tools/testing/selftests/sysctl/run_numerictests b/tools/testing/selftests/sysctl/run_numerictests
deleted file mode 100755
index e6e76c93d948..000000000000
--- a/tools/testing/selftests/sysctl/run_numerictests
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-SYSCTL="/proc/sys"
-TARGET="${SYSCTL}/vm/swappiness"
-ORIG=$(cat "${TARGET}")
-TEST_STR=$(( $ORIG + 1 ))
-
-. ./common_tests
-
-exit_test
diff --git a/tools/testing/selftests/sysctl/run_stringtests b/tools/testing/selftests/sysctl/run_stringtests
deleted file mode 100755
index 857ec667fb02..000000000000
--- a/tools/testing/selftests/sysctl/run_stringtests
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/bin/sh
-
-SYSCTL="/proc/sys"
-TARGET="${SYSCTL}/kernel/domainname"
-ORIG=$(cat "${TARGET}")
-TEST_STR="Testing sysctl"
-
-. ./common_tests
-
-# Only string sysctls support seeking/appending.
-MAXLEN=65
-
-echo -n "Writing entire sysctl in short writes ... "
-set_orig
-dd if="${TEST_FILE}" of="${TARGET}" bs=1 2>/dev/null
-if ! verify "${TARGET}"; then
- echo "FAIL" >&2
- rc=1
-else
- echo "ok"
-fi
-
-echo -n "Writing middle of sysctl after unsynchronized seek ... "
-set_test
-dd if="${TEST_FILE}" of="${TARGET}" bs=1 seek=1 2>/dev/null
-if verify "${TARGET}"; then
- echo "FAIL" >&2
- rc=1
-else
- echo "ok"
-fi
-
-echo -n "Checking sysctl maxlen is at least $MAXLEN ... "
-set_orig
-perl -e 'print "A" x ('"${MAXLEN}"'-2), "B";' | \
- dd of="${TARGET}" bs="${MAXLEN}" 2>/dev/null
-if ! grep -q B "${TARGET}"; then
- echo "FAIL" >&2
- rc=1
-else
- echo "ok"
-fi
-
-echo -n "Checking sysctl keeps original string on overflow append ... "
-set_orig
-perl -e 'print "A" x ('"${MAXLEN}"'-1), "B";' | \
- dd of="${TARGET}" bs=$(( MAXLEN - 1 )) 2>/dev/null
-if grep -q B "${TARGET}"; then
- echo "FAIL" >&2
- rc=1
-else
- echo "ok"
-fi
-
-echo -n "Checking sysctl stays NULL terminated on write ... "
-set_orig
-perl -e 'print "A" x ('"${MAXLEN}"'-1), "B";' | \
- dd of="${TARGET}" bs="${MAXLEN}" 2>/dev/null
-if grep -q B "${TARGET}"; then
- echo "FAIL" >&2
- rc=1
-else
- echo "ok"
-fi
-
-echo -n "Checking sysctl stays NULL terminated on overwrite ... "
-set_orig
-perl -e 'print "A" x ('"${MAXLEN}"'-1), "BB";' | \
- dd of="${TARGET}" bs=$(( $MAXLEN + 1 )) 2>/dev/null
-if grep -q B "${TARGET}"; then
- echo "FAIL" >&2
- rc=1
-else
- echo "ok"
-fi
-
-exit_test
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
new file mode 100644
index 000000000000..ec232c3cfcaa
--- /dev/null
+++ b/tools/testing/selftests/sysctl/sysctl.sh
@@ -0,0 +1,774 @@
+#!/bin/bash
+# Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or at your option any
+# later version; or, when distributed separately from the Linux kernel or
+# when incorporated into other software packages, subject to the following
+# license:
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of copyleft-next (version 0.3.1 or later) as published
+# at http://copyleft-next.org/.
+
+# This performs a series tests against the proc sysctl interface.
+
+TEST_NAME="sysctl"
+TEST_DRIVER="test_${TEST_NAME}"
+TEST_DIR=$(dirname $0)
+TEST_FILE=$(mktemp)
+
+# This represents
+#
+# TEST_ID:TEST_COUNT:ENABLED
+#
+# TEST_ID: is the test id number
+# TEST_COUNT: number of times we should run the test
+# ENABLED: 1 if enabled, 0 otherwise
+#
+# Once these are enabled please leave them as-is. Write your own test,
+# we have tons of space.
+ALL_TESTS="0001:1:1"
+ALL_TESTS="$ALL_TESTS 0002:1:1"
+ALL_TESTS="$ALL_TESTS 0003:1:1"
+ALL_TESTS="$ALL_TESTS 0004:1:1"
+ALL_TESTS="$ALL_TESTS 0005:3:1"
+
+test_modprobe()
+{
+ if [ ! -d $DIR ]; then
+ echo "$0: $DIR not present" >&2
+ echo "You must have the following enabled in your kernel:" >&2
+ cat $TEST_DIR/config >&2
+ exit 1
+ fi
+}
+
+function allow_user_defaults()
+{
+ if [ -z $DIR ]; then
+ DIR="/sys/module/test_sysctl/"
+ fi
+ if [ -z $DEFAULT_NUM_TESTS ]; then
+ DEFAULT_NUM_TESTS=50
+ fi
+ if [ -z $SYSCTL ]; then
+ SYSCTL="/proc/sys/debug/test_sysctl"
+ fi
+ if [ -z $PROD_SYSCTL ]; then
+ PROD_SYSCTL="/proc/sys"
+ fi
+ if [ -z $WRITES_STRICT ]; then
+ WRITES_STRICT="${PROD_SYSCTL}/kernel/sysctl_writes_strict"
+ fi
+}
+
+function check_production_sysctl_writes_strict()
+{
+ echo -n "Checking production write strict setting ... "
+ if [ ! -e ${WRITES_STRICT} ]; then
+ echo "FAIL, but skip in case of old kernel" >&2
+ else
+ old_strict=$(cat ${WRITES_STRICT})
+ if [ "$old_strict" = "1" ]; then
+ echo "ok"
+ else
+ echo "FAIL, strict value is 0 but force to 1 to continue" >&2
+ echo "1" > ${WRITES_STRICT}
+ fi
+ fi
+
+ if [ -z $PAGE_SIZE ]; then
+ PAGE_SIZE=$(getconf PAGESIZE)
+ fi
+ if [ -z $MAX_DIGITS ]; then
+ MAX_DIGITS=$(($PAGE_SIZE/8))
+ fi
+ if [ -z $INT_MAX ]; then
+ INT_MAX=$(getconf INT_MAX)
+ fi
+ if [ -z $UINT_MAX ]; then
+ UINT_MAX=$(getconf UINT_MAX)
+ fi
+}
+
+test_reqs()
+{
+ uid=$(id -u)
+ if [ $uid -ne 0 ]; then
+ echo $msg must be run as root >&2
+ exit 0
+ fi
+
+ if ! which perl 2> /dev/null > /dev/null; then
+ echo "$0: You need perl installed"
+ exit 1
+ fi
+ if ! which getconf 2> /dev/null > /dev/null; then
+ echo "$0: You need getconf installed"
+ exit 1
+ fi
+ if ! which diff 2> /dev/null > /dev/null; then
+ echo "$0: You need diff installed"
+ exit 1
+ fi
+}
+
+function load_req_mod()
+{
+ trap "test_modprobe" EXIT
+
+ if [ ! -d $DIR ]; then
+ modprobe $TEST_DRIVER
+ if [ $? -ne 0 ]; then
+ exit
+ fi
+ fi
+}
+
+reset_vals()
+{
+ VAL=""
+ TRIGGER=$(basename ${TARGET})
+ case "$TRIGGER" in
+ int_0001)
+ VAL="60"
+ ;;
+ int_0002)
+ VAL="1"
+ ;;
+ uint_0001)
+ VAL="314"
+ ;;
+ string_0001)
+ VAL="(none)"
+ ;;
+ *)
+ ;;
+ esac
+ echo -n $VAL > $TARGET
+}
+
+set_orig()
+{
+ if [ ! -z $TARGET ]; then
+ echo "${ORIG}" > "${TARGET}"
+ fi
+}
+
+set_test()
+{
+ echo "${TEST_STR}" > "${TARGET}"
+}
+
+verify()
+{
+ local seen
+ seen=$(cat "$1")
+ if [ "${seen}" != "${TEST_STR}" ]; then
+ return 1
+ fi
+ return 0
+}
+
+verify_diff_w()
+{
+ echo "$TEST_STR" | diff -q -w -u - $1
+ return $?
+}
+
+test_rc()
+{
+ if [[ $rc != 0 ]]; then
+ echo "Failed test, return value: $rc" >&2
+ exit $rc
+ fi
+}
+
+test_finish()
+{
+ set_orig
+ rm -f "${TEST_FILE}"
+
+ if [ ! -z ${old_strict} ]; then
+ echo ${old_strict} > ${WRITES_STRICT}
+ fi
+ exit $rc
+}
+
+run_numerictests()
+{
+ echo "== Testing sysctl behavior against ${TARGET} =="
+
+ rc=0
+
+ echo -n "Writing test file ... "
+ echo "${TEST_STR}" > "${TEST_FILE}"
+ if ! verify "${TEST_FILE}"; then
+ echo "FAIL" >&2
+ exit 1
+ else
+ echo "ok"
+ fi
+
+ echo -n "Checking sysctl is not set to test value ... "
+ if verify "${TARGET}"; then
+ echo "FAIL" >&2
+ exit 1
+ else
+ echo "ok"
+ fi
+
+ echo -n "Writing sysctl from shell ... "
+ set_test
+ if ! verify "${TARGET}"; then
+ echo "FAIL" >&2
+ exit 1
+ else
+ echo "ok"
+ fi
+
+ echo -n "Resetting sysctl to original value ... "
+ set_orig
+ if verify "${TARGET}"; then
+ echo "FAIL" >&2
+ exit 1
+ else
+ echo "ok"
+ fi
+
+ # Now that we've validated the sanity of "set_test" and "set_orig",
+ # we can use those functions to set starting states before running
+ # specific behavioral tests.
+
+ echo -n "Writing entire sysctl in single write ... "
+ set_orig
+ dd if="${TEST_FILE}" of="${TARGET}" bs=4096 2>/dev/null
+ if ! verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+
+ echo -n "Writing middle of sysctl after synchronized seek ... "
+ set_test
+ dd if="${TEST_FILE}" of="${TARGET}" bs=1 seek=1 skip=1 2>/dev/null
+ if ! verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+
+ echo -n "Writing beyond end of sysctl ... "
+ set_orig
+ dd if="${TEST_FILE}" of="${TARGET}" bs=20 seek=2 2>/dev/null
+ if verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+
+ echo -n "Writing sysctl with multiple long writes ... "
+ set_orig
+ (perl -e 'print "A" x 50;'; echo "${TEST_STR}") | \
+ dd of="${TARGET}" bs=50 2>/dev/null
+ if verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+}
+
+# Your test must accept digits 3 and 4 to use this
+run_limit_digit()
+{
+ echo -n "Checking ignoring spaces up to PAGE_SIZE works on write ..."
+ reset_vals
+
+ LIMIT=$((MAX_DIGITS -1))
+ TEST_STR="3"
+ (perl -e 'print " " x '$LIMIT';'; echo "${TEST_STR}") | \
+ dd of="${TARGET}" 2>/dev/null
+
+ if ! verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+
+ echo -n "Checking passing PAGE_SIZE of spaces fails on write ..."
+ reset_vals
+
+ LIMIT=$((MAX_DIGITS))
+ TEST_STR="4"
+ (perl -e 'print " " x '$LIMIT';'; echo "${TEST_STR}") | \
+ dd of="${TARGET}" 2>/dev/null
+
+ if verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+}
+
+# You are using an int
+run_limit_digit_int()
+{
+ echo -n "Testing INT_MAX works ..."
+ reset_vals
+ TEST_STR="$INT_MAX"
+ echo -n $TEST_STR > $TARGET
+
+ if ! verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+
+ echo -n "Testing INT_MAX + 1 will fail as expected..."
+ reset_vals
+ let TEST_STR=$INT_MAX+1
+ echo -n $TEST_STR > $TARGET 2> /dev/null
+
+ if verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+
+ echo -n "Testing negative values will work as expected..."
+ reset_vals
+ TEST_STR="-3"
+ echo -n $TEST_STR > $TARGET 2> /dev/null
+ if ! verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+}
+
+# You used an int array
+run_limit_digit_int_array()
+{
+ echo -n "Testing array works as expected ... "
+ TEST_STR="4 3 2 1"
+ echo -n $TEST_STR > $TARGET
+
+ if ! verify_diff_w "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+
+ echo -n "Testing skipping trailing array elements works ... "
+ # Do not reset_vals, carry on the values from the last test.
+ # If we only echo in two digits the last two are left intact
+ TEST_STR="100 101"
+ echo -n $TEST_STR > $TARGET
+ # After we echo in, to help diff we need to set on TEST_STR what
+ # we expect the result to be.
+ TEST_STR="100 101 2 1"
+
+ if ! verify_diff_w "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+
+ echo -n "Testing PAGE_SIZE limit on array works ... "
+ # Do not reset_vals, carry on the values from the last test.
+ # Even if you use an int array, you are still restricted to
+ # MAX_DIGITS, this is a known limitation. Test limit works.
+ LIMIT=$((MAX_DIGITS -1))
+ TEST_STR="9"
+ (perl -e 'print " " x '$LIMIT';'; echo "${TEST_STR}") | \
+ dd of="${TARGET}" 2>/dev/null
+
+ TEST_STR="9 101 2 1"
+ if ! verify_diff_w "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+
+ echo -n "Testing exceeding PAGE_SIZE limit fails as expected ... "
+ # Do not reset_vals, carry on the values from the last test.
+ # Now go over limit.
+ LIMIT=$((MAX_DIGITS))
+ TEST_STR="7"
+ (perl -e 'print " " x '$LIMIT';'; echo "${TEST_STR}") | \
+ dd of="${TARGET}" 2>/dev/null
+
+ TEST_STR="7 101 2 1"
+ if verify_diff_w "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+}
+
+# You are using an unsigned int
+run_limit_digit_uint()
+{
+ echo -n "Testing UINT_MAX works ..."
+ reset_vals
+ TEST_STR="$UINT_MAX"
+ echo -n $TEST_STR > $TARGET
+
+ if ! verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+
+ echo -n "Testing UINT_MAX + 1 will fail as expected..."
+ reset_vals
+ TEST_STR=$(($UINT_MAX+1))
+ echo -n $TEST_STR > $TARGET 2> /dev/null
+
+ if verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+
+ echo -n "Testing negative values will not work as expected ..."
+ reset_vals
+ TEST_STR="-3"
+ echo -n $TEST_STR > $TARGET 2> /dev/null
+
+ if verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+}
+
+run_stringtests()
+{
+ echo -n "Writing entire sysctl in short writes ... "
+ set_orig
+ dd if="${TEST_FILE}" of="${TARGET}" bs=1 2>/dev/null
+ if ! verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+
+ echo -n "Writing middle of sysctl after unsynchronized seek ... "
+ set_test
+ dd if="${TEST_FILE}" of="${TARGET}" bs=1 seek=1 2>/dev/null
+ if verify "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+
+ echo -n "Checking sysctl maxlen is at least $MAXLEN ... "
+ set_orig
+ perl -e 'print "A" x ('"${MAXLEN}"'-2), "B";' | \
+ dd of="${TARGET}" bs="${MAXLEN}" 2>/dev/null
+ if ! grep -q B "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+
+ echo -n "Checking sysctl keeps original string on overflow append ... "
+ set_orig
+ perl -e 'print "A" x ('"${MAXLEN}"'-1), "B";' | \
+ dd of="${TARGET}" bs=$(( MAXLEN - 1 )) 2>/dev/null
+ if grep -q B "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+
+ echo -n "Checking sysctl stays NULL terminated on write ... "
+ set_orig
+ perl -e 'print "A" x ('"${MAXLEN}"'-1), "B";' | \
+ dd of="${TARGET}" bs="${MAXLEN}" 2>/dev/null
+ if grep -q B "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+
+ echo -n "Checking sysctl stays NULL terminated on overwrite ... "
+ set_orig
+ perl -e 'print "A" x ('"${MAXLEN}"'-1), "BB";' | \
+ dd of="${TARGET}" bs=$(( $MAXLEN + 1 )) 2>/dev/null
+ if grep -q B "${TARGET}"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+
+ test_rc
+}
+
+sysctl_test_0001()
+{
+ TARGET="${SYSCTL}/int_0001"
+ reset_vals
+ ORIG=$(cat "${TARGET}")
+ TEST_STR=$(( $ORIG + 1 ))
+
+ run_numerictests
+ run_limit_digit
+}
+
+sysctl_test_0002()
+{
+ TARGET="${SYSCTL}/string_0001"
+ reset_vals
+ ORIG=$(cat "${TARGET}")
+ TEST_STR="Testing sysctl"
+ # Only string sysctls support seeking/appending.
+ MAXLEN=65
+
+ run_numerictests
+ run_stringtests
+}
+
+sysctl_test_0003()
+{
+ TARGET="${SYSCTL}/int_0002"
+ reset_vals
+ ORIG=$(cat "${TARGET}")
+ TEST_STR=$(( $ORIG + 1 ))
+
+ run_numerictests
+ run_limit_digit
+ run_limit_digit_int
+}
+
+sysctl_test_0004()
+{
+ TARGET="${SYSCTL}/uint_0001"
+ reset_vals
+ ORIG=$(cat "${TARGET}")
+ TEST_STR=$(( $ORIG + 1 ))
+
+ run_numerictests
+ run_limit_digit
+ run_limit_digit_uint
+}
+
+sysctl_test_0005()
+{
+ TARGET="${SYSCTL}/int_0003"
+ reset_vals
+ ORIG=$(cat "${TARGET}")
+
+ run_limit_digit_int_array
+}
+
+list_tests()
+{
+ echo "Test ID list:"
+ echo
+ echo "TEST_ID x NUM_TEST"
+ echo "TEST_ID: Test ID"
+ echo "NUM_TESTS: Number of recommended times to run the test"
+ echo
+ echo "0001 x $(get_test_count 0001) - tests proc_dointvec_minmax()"
+ echo "0002 x $(get_test_count 0002) - tests proc_dostring()"
+ echo "0003 x $(get_test_count 0003) - tests proc_dointvec()"
+ echo "0004 x $(get_test_count 0004) - tests proc_douintvec()"
+ echo "0005 x $(get_test_count 0005) - tests proc_douintvec() array"
+}
+
+test_reqs
+
+usage()
+{
+ NUM_TESTS=$(grep -o ' ' <<<"$ALL_TESTS" | grep -c .)
+ let NUM_TESTS=$NUM_TESTS+1
+ MAX_TEST=$(printf "%04d\n" $NUM_TESTS)
+ echo "Usage: $0 [ -t <4-number-digit> ] | [ -w <4-number-digit> ] |"
+ echo " [ -s <4-number-digit> ] | [ -c <4-number-digit> <test- count>"
+ echo " [ all ] [ -h | --help ] [ -l ]"
+ echo ""
+ echo "Valid tests: 0001-$MAX_TEST"
+ echo ""
+ echo " all Runs all tests (default)"
+ echo " -t Run test ID the number amount of times is recommended"
+ echo " -w Watch test ID run until it runs into an error"
+ echo " -c Run test ID once"
+ echo " -s Run test ID x test-count number of times"
+ echo " -l List all test ID list"
+ echo " -h|--help Help"
+ echo
+ echo "If an error every occurs execution will immediately terminate."
+ echo "If you are adding a new test try using -w <test-ID> first to"
+ echo "make sure the test passes a series of tests."
+ echo
+ echo Example uses:
+ echo
+ echo "$TEST_NAME.sh -- executes all tests"
+ echo "$TEST_NAME.sh -t 0002 -- Executes test ID 0002 number of times is recomended"
+ echo "$TEST_NAME.sh -w 0002 -- Watch test ID 0002 run until an error occurs"
+ echo "$TEST_NAME.sh -s 0002 -- Run test ID 0002 once"
+ echo "$TEST_NAME.sh -c 0002 3 -- Run test ID 0002 three times"
+ echo
+ list_tests
+ exit 1
+}
+
+function test_num()
+{
+ re='^[0-9]+$'
+ if ! [[ $1 =~ $re ]]; then
+ usage
+ fi
+}
+
+function get_test_count()
+{
+ test_num $1
+ TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
+ LAST_TWO=${TEST_DATA#*:*}
+ echo ${LAST_TWO%:*}
+}
+
+function get_test_enabled()
+{
+ test_num $1
+ TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
+ echo ${TEST_DATA#*:*:}
+}
+
+function run_all_tests()
+{
+ for i in $ALL_TESTS ; do
+ TEST_ID=${i%:*:*}
+ ENABLED=$(get_test_enabled $TEST_ID)
+ TEST_COUNT=$(get_test_count $TEST_ID)
+ if [[ $ENABLED -eq "1" ]]; then
+ test_case $TEST_ID $TEST_COUNT
+ fi
+ done
+}
+
+function watch_log()
+{
+ if [ $# -ne 3 ]; then
+ clear
+ fi
+ date
+ echo "Running test: $2 - run #$1"
+}
+
+function watch_case()
+{
+ i=0
+ while [ 1 ]; do
+
+ if [ $# -eq 1 ]; then
+ test_num $1
+ watch_log $i ${TEST_NAME}_test_$1
+ ${TEST_NAME}_test_$1
+ else
+ watch_log $i all
+ run_all_tests
+ fi
+ let i=$i+1
+ done
+}
+
+function test_case()
+{
+ NUM_TESTS=$DEFAULT_NUM_TESTS
+ if [ $# -eq 2 ]; then
+ NUM_TESTS=$2
+ fi
+
+ i=0
+ while [ $i -lt $NUM_TESTS ]; do
+ test_num $1
+ watch_log $i ${TEST_NAME}_test_$1 noclear
+ RUN_TEST=${TEST_NAME}_test_$1
+ $RUN_TEST
+ let i=$i+1
+ done
+}
+
+function parse_args()
+{
+ if [ $# -eq 0 ]; then
+ run_all_tests
+ else
+ if [[ "$1" = "all" ]]; then
+ run_all_tests
+ elif [[ "$1" = "-w" ]]; then
+ shift
+ watch_case $@
+ elif [[ "$1" = "-t" ]]; then
+ shift
+ test_num $1
+ test_case $1 $(get_test_count $1)
+ elif [[ "$1" = "-c" ]]; then
+ shift
+ test_num $1
+ test_num $2
+ test_case $1 $2
+ elif [[ "$1" = "-s" ]]; then
+ shift
+ test_case $1 1
+ elif [[ "$1" = "-l" ]]; then
+ list_tests
+ elif [[ "$1" = "-h" || "$1" = "--help" ]]; then
+ usage
+ else
+ usage
+ fi
+ fi
+}
+
+test_reqs
+allow_user_defaults
+check_production_sysctl_writes_strict
+load_req_mod
+
+trap "test_finish" EXIT
+
+parse_args $@
+
+exit 0
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index 5801bbefbe89..a9b86133b9b3 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -9,7 +9,7 @@ TEST_GEN_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \
TEST_GEN_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew \
skew_consistency clocksource-switch freq-step leap-a-day \
- leapcrash set-tai set-2038 set-tz
+ leapcrash set-tai set-2038 set-tz rtctest_setdate
include ../lib.mk
diff --git a/tools/testing/selftests/timers/rtctest.c b/tools/testing/selftests/timers/rtctest.c
index 4230d3052e5d..f61170f7b024 100644
--- a/tools/testing/selftests/timers/rtctest.c
+++ b/tools/testing/selftests/timers/rtctest.c
@@ -21,6 +21,9 @@
#include <stdlib.h>
#include <errno.h>
+#ifndef ARRAY_SIZE
+# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
/*
* This expects the new RTC class driver framework, working with
@@ -29,23 +32,84 @@
*/
static const char default_rtc[] = "/dev/rtc0";
+static struct rtc_time cutoff_dates[] = {
+ {
+ .tm_year = 70, /* 1970 -1900 */
+ .tm_mday = 1,
+ },
+ /* signed time_t 19/01/2038 3:14:08 */
+ {
+ .tm_year = 138,
+ .tm_mday = 19,
+ },
+ {
+ .tm_year = 138,
+ .tm_mday = 20,
+ },
+ {
+ .tm_year = 199, /* 2099 -1900 */
+ .tm_mday = 1,
+ },
+ {
+ .tm_year = 200, /* 2100 -1900 */
+ .tm_mday = 1,
+ },
+ /* unsigned time_t 07/02/2106 7:28:15*/
+ {
+ .tm_year = 205,
+ .tm_mon = 1,
+ .tm_mday = 7,
+ },
+ {
+ .tm_year = 206,
+ .tm_mon = 1,
+ .tm_mday = 8,
+ },
+ /* signed time on 64bit in nanoseconds 12/04/2262 01:47:16*/
+ {
+ .tm_year = 362,
+ .tm_mon = 3,
+ .tm_mday = 12,
+ },
+ {
+ .tm_year = 362, /* 2262 -1900 */
+ .tm_mon = 3,
+ .tm_mday = 13,
+ },
+};
+
+static int compare_dates(struct rtc_time *a, struct rtc_time *b)
+{
+ if (a->tm_year != b->tm_year ||
+ a->tm_mon != b->tm_mon ||
+ a->tm_mday != b->tm_mday ||
+ a->tm_hour != b->tm_hour ||
+ a->tm_min != b->tm_min ||
+ ((b->tm_sec - a->tm_sec) > 1))
+ return 1;
+
+ return 0;
+}
int main(int argc, char **argv)
{
- int i, fd, retval, irqcount = 0;
+ int i, fd, retval, irqcount = 0, dangerous = 0;
unsigned long tmp, data;
struct rtc_time rtc_tm;
const char *rtc = default_rtc;
struct timeval start, end, diff;
switch (argc) {
+ case 3:
+ if (*argv[2] == 'd')
+ dangerous = 1;
case 2:
rtc = argv[1];
/* FALLTHROUGH */
case 1:
break;
default:
- fprintf(stderr, "usage: rtctest [rtcdev]\n");
+ fprintf(stderr, "usage: rtctest [rtcdev] [d]\n");
return 1;
}
@@ -202,7 +266,7 @@ test_PIE:
/* not all RTCs support periodic IRQs */
if (errno == EINVAL) {
fprintf(stderr, "\nNo periodic IRQ support\n");
- goto done;
+ goto test_DATE;
}
perror("RTC_IRQP_READ ioctl");
exit(errno);
@@ -221,7 +285,7 @@ test_PIE:
if (errno == EINVAL) {
fprintf(stderr,
"\n...Periodic IRQ rate is fixed\n");
- goto done;
+ goto test_DATE;
}
perror("RTC_IRQP_SET ioctl");
exit(errno);
@@ -269,6 +333,62 @@ test_PIE:
}
}
+test_DATE:
+ if (!dangerous)
+ goto done;
+
+ fprintf(stderr, "\nTesting problematic dates\n");
+
+ for (i = 0; i < ARRAY_SIZE(cutoff_dates); i++) {
+ struct rtc_time current;
+
+ /* Write the new date in RTC */
+ retval = ioctl(fd, RTC_SET_TIME, &cutoff_dates[i]);
+ if (retval == -1) {
+ perror("RTC_SET_TIME ioctl");
+ close(fd);
+ exit(errno);
+ }
+
+ /* Read back */
+ retval = ioctl(fd, RTC_RD_TIME, &current);
+ if (retval == -1) {
+ perror("RTC_RD_TIME ioctl");
+ exit(errno);
+ }
+
+ if(compare_dates(&cutoff_dates[i], &current)) {
+ fprintf(stderr,"Setting date %d failed\n",
+ cutoff_dates[i].tm_year + 1900);
+ goto done;
+ }
+
+ cutoff_dates[i].tm_sec += 5;
+
+ /* Write the new alarm in RTC */
+ retval = ioctl(fd, RTC_ALM_SET, &cutoff_dates[i]);
+ if (retval == -1) {
+ perror("RTC_ALM_SET ioctl");
+ close(fd);
+ exit(errno);
+ }
+
+ /* Read back */
+ retval = ioctl(fd, RTC_ALM_READ, &current);
+ if (retval == -1) {
+ perror("RTC_ALM_READ ioctl");
+ exit(errno);
+ }
+
+ if(compare_dates(&cutoff_dates[i], &current)) {
+ fprintf(stderr,"Setting alarm %d failed\n",
+ cutoff_dates[i].tm_year + 1900);
+ goto done;
+ }
+
+ fprintf(stderr, "Setting year %d is OK \n",
+ cutoff_dates[i].tm_year + 1900);
+ }
done:
fprintf(stderr, "\n\n\t\t\t *** Test complete ***\n");
diff --git a/tools/testing/selftests/timers/rtctest_setdate.c b/tools/testing/selftests/timers/rtctest_setdate.c
new file mode 100644
index 000000000000..2cb78489eca4
--- /dev/null
+++ b/tools/testing/selftests/timers/rtctest_setdate.c
@@ -0,0 +1,86 @@
+/* Real Time Clock Driver Test
+ * by: Benjamin Gaignard (benjamin.gaignard@linaro.org)
+ *
+ * To build
+ * gcc rtctest_setdate.c -o rtctest_setdate
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdio.h>
+#include <linux/rtc.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+
+static const char default_time[] = "00:00:00";
+
+int main(int argc, char **argv)
+{
+ int fd, retval;
+ struct rtc_time new, current;
+ const char *rtc, *date;
+ const char *time = default_time;
+
+ switch (argc) {
+ case 4:
+ time = argv[3];
+ /* FALLTHROUGH */
+ case 3:
+ date = argv[2];
+ rtc = argv[1];
+ break;
+ default:
+ fprintf(stderr, "usage: rtctest_setdate <rtcdev> <DD-MM-YYYY> [HH:MM:SS]\n");
+ return 1;
+ }
+
+ fd = open(rtc, O_RDONLY);
+ if (fd == -1) {
+ perror(rtc);
+ exit(errno);
+ }
+
+ sscanf(date, "%d-%d-%d", &new.tm_mday, &new.tm_mon, &new.tm_year);
+ new.tm_mon -= 1;
+ new.tm_year -= 1900;
+ sscanf(time, "%d:%d:%d", &new.tm_hour, &new.tm_min, &new.tm_sec);
+
+ fprintf(stderr, "Test will set RTC date/time to %d-%d-%d, %02d:%02d:%02d.\n",
+ new.tm_mday, new.tm_mon + 1, new.tm_year + 1900,
+ new.tm_hour, new.tm_min, new.tm_sec);
+
+ /* Write the new date in RTC */
+ retval = ioctl(fd, RTC_SET_TIME, &new);
+ if (retval == -1) {
+ perror("RTC_SET_TIME ioctl");
+ close(fd);
+ exit(errno);
+ }
+
+ /* Read back */
+ retval = ioctl(fd, RTC_RD_TIME, &current);
+ if (retval == -1) {
+ perror("RTC_RD_TIME ioctl");
+ exit(errno);
+ }
+
+ fprintf(stderr, "\n\nCurrent RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n",
+ current.tm_mday, current.tm_mon + 1, current.tm_year + 1900,
+ current.tm_hour, current.tm_min, current.tm_sec);
+
+ close(fd);
+ return 0;
+}
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 9120edf3c94b..f2ac53ab8243 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -825,7 +825,7 @@ static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
if (ret < 0)
goto unlock_fail;
- kvm->buses[bus_idx]->ioeventfd_count++;
+ kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
list_add_tail(&p->list, &kvm->ioeventfds);
mutex_unlock(&kvm->slots_lock);
@@ -848,6 +848,7 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
{
struct _ioeventfd *p, *tmp;
struct eventfd_ctx *eventfd;
+ struct kvm_io_bus *bus;
int ret = -ENOENT;
eventfd = eventfd_ctx_fdget(args->fd);
@@ -870,8 +871,9 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
continue;
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
- if (kvm->buses[bus_idx])
- kvm->buses[bus_idx]->ioeventfd_count--;
+ bus = kvm_get_bus(kvm, bus_idx);
+ if (bus)
+ bus->ioeventfd_count--;
ioeventfd_release(p);
ret = 0;
break;
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index 31e40c9e81df..b1286c4e0712 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -230,7 +230,7 @@ int kvm_set_irq_routing(struct kvm *kvm,
}
mutex_lock(&kvm->irq_lock);
- old = kvm->irq_routing;
+ old = rcu_dereference_protected(kvm->irq_routing, 1);
rcu_assign_pointer(kvm->irq_routing, new);
kvm_irq_routing_update(kvm);
kvm_arch_irq_routing_update(kvm);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 19f0ecb9b93e..82987d457b8b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -130,6 +130,12 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
static bool largepages_enabled = true;
+#define KVM_EVENT_CREATE_VM 0
+#define KVM_EVENT_DESTROY_VM 1
+static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
+static unsigned long long kvm_createvm_count;
+static unsigned long long kvm_active_vms;
+
bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
{
if (pfn_valid(pfn))
@@ -187,12 +193,23 @@ static void ack_flush(void *_completed)
{
}
+static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
+{
+ if (unlikely(!cpus))
+ cpus = cpu_online_mask;
+
+ if (cpumask_empty(cpus))
+ return false;
+
+ smp_call_function_many(cpus, ack_flush, NULL, wait);
+ return true;
+}
+
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
int i, cpu, me;
cpumask_var_t cpus;
- bool called = true;
- bool wait = req & KVM_REQUEST_WAIT;
+ bool called;
struct kvm_vcpu *vcpu;
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
@@ -207,14 +224,9 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
if (cpus != NULL && cpu != -1 && cpu != me &&
kvm_request_needs_ipi(vcpu, req))
- cpumask_set_cpu(cpu, cpus);
+ __cpumask_set_cpu(cpu, cpus);
}
- if (unlikely(cpus == NULL))
- smp_call_function_many(cpu_online_mask, ack_flush, NULL, wait);
- else if (!cpumask_empty(cpus))
- smp_call_function_many(cpus, ack_flush, NULL, wait);
- else
- called = false;
+ called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
put_cpu();
free_cpumask_var(cpus);
return called;
@@ -293,7 +305,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
- put_pid(vcpu->pid);
+ /*
+ * no need for rcu_read_lock as VCPU_RUN is the only place that
+ * will change the vcpu->pid pointer and on uninit all file
+ * descriptors are already gone.
+ */
+ put_pid(rcu_dereference_protected(vcpu->pid, 1));
kvm_arch_vcpu_uninit(vcpu);
free_page((unsigned long)vcpu->run);
}
@@ -674,8 +691,8 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (init_srcu_struct(&kvm->irq_srcu))
goto out_err_no_irq_srcu;
for (i = 0; i < KVM_NR_BUSES; i++) {
- kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
- GFP_KERNEL);
+ rcu_assign_pointer(kvm->buses[i],
+ kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL));
if (!kvm->buses[i])
goto out_err;
}
@@ -700,9 +717,10 @@ out_err_no_srcu:
hardware_disable_all();
out_err_no_disable:
for (i = 0; i < KVM_NR_BUSES; i++)
- kfree(kvm->buses[i]);
+ kfree(rcu_access_pointer(kvm->buses[i]));
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
- kvm_free_memslots(kvm, kvm->memslots[i]);
+ kvm_free_memslots(kvm,
+ rcu_dereference_protected(kvm->memslots[i], 1));
kvm_arch_free_vm(kvm);
mmdrop(current->mm);
return ERR_PTR(r);
@@ -728,6 +746,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
int i;
struct mm_struct *mm = kvm->mm;
+ kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
kvm_destroy_vm_debugfs(kvm);
kvm_arch_sync_events(kvm);
spin_lock(&kvm_lock);
@@ -735,8 +754,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) {
- if (kvm->buses[i])
- kvm_io_bus_destroy(kvm->buses[i]);
+ struct kvm_io_bus *bus;
+
+ bus = rcu_dereference_protected(kvm->buses[i], 1);
+ if (bus)
+ kvm_io_bus_destroy(bus);
kvm->buses[i] = NULL;
}
kvm_coalesced_mmio_free(kvm);
@@ -748,7 +770,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
kvm_arch_destroy_vm(kvm);
kvm_destroy_devices(kvm);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
- kvm_free_memslots(kvm, kvm->memslots[i]);
+ kvm_free_memslots(kvm,
+ rcu_dereference_protected(kvm->memslots[i], 1));
cleanup_srcu_struct(&kvm->irq_srcu);
cleanup_srcu_struct(&kvm->srcu);
kvm_arch_free_vm(kvm);
@@ -2551,13 +2574,14 @@ static long kvm_vcpu_ioctl(struct file *filp,
if (r)
return r;
switch (ioctl) {
- case KVM_RUN:
+ case KVM_RUN: {
+ struct pid *oldpid;
r = -EINVAL;
if (arg)
goto out;
- if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
+ oldpid = rcu_access_pointer(vcpu->pid);
+ if (unlikely(oldpid != current->pids[PIDTYPE_PID].pid)) {
/* The thread running this VCPU changed. */
- struct pid *oldpid = vcpu->pid;
struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
rcu_assign_pointer(vcpu->pid, newpid);
@@ -2568,6 +2592,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
+ }
case KVM_GET_REGS: {
struct kvm_regs *kvm_regs;
@@ -3202,6 +3227,7 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
fput(file);
return -ENOMEM;
}
+ kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
fd_install(r, file);
return r;
@@ -3563,7 +3589,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
{
struct kvm_io_bus *new_bus, *bus;
- bus = kvm->buses[bus_idx];
+ bus = kvm_get_bus(kvm, bus_idx);
if (!bus)
return -ENOMEM;
@@ -3592,7 +3618,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
int i;
struct kvm_io_bus *new_bus, *bus;
- bus = kvm->buses[bus_idx];
+ bus = kvm_get_bus(kvm, bus_idx);
if (!bus)
return;
@@ -3854,6 +3880,67 @@ static const struct file_operations *stat_fops[] = {
[KVM_STAT_VM] = &vm_stat_fops,
};
+static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
+{
+ struct kobj_uevent_env *env;
+ char *tmp, *pathbuf = NULL;
+ unsigned long long created, active;
+
+ if (!kvm_dev.this_device || !kvm)
+ return;
+
+ spin_lock(&kvm_lock);
+ if (type == KVM_EVENT_CREATE_VM) {
+ kvm_createvm_count++;
+ kvm_active_vms++;
+ } else if (type == KVM_EVENT_DESTROY_VM) {
+ kvm_active_vms--;
+ }
+ created = kvm_createvm_count;
+ active = kvm_active_vms;
+ spin_unlock(&kvm_lock);
+
+ env = kzalloc(sizeof(*env), GFP_KERNEL);
+ if (!env)
+ return;
+
+ add_uevent_var(env, "CREATED=%llu", created);
+ add_uevent_var(env, "COUNT=%llu", active);
+
+ if (type == KVM_EVENT_CREATE_VM)
+ add_uevent_var(env, "EVENT=create");
+ else if (type == KVM_EVENT_DESTROY_VM)
+ add_uevent_var(env, "EVENT=destroy");
+
+ if (kvm->debugfs_dentry) {
+ char p[ITOA_MAX_LEN];
+
+ snprintf(p, sizeof(p), "%s", kvm->debugfs_dentry->d_name.name);
+ tmp = strchrnul(p + 1, '-');
+ *tmp = '\0';
+ add_uevent_var(env, "PID=%s", p);
+ pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (pathbuf) {
+ /* sizeof counts the final '\0' */
+ int len = sizeof("STATS_PATH=") - 1;
+ const char *pvar = "STATS_PATH=";
+
+ tmp = dentry_path_raw(kvm->debugfs_dentry,
+ pathbuf + len,
+ PATH_MAX - len);
+ if (!IS_ERR(tmp)) {
+ memcpy(tmp - len, pvar, len);
+ env->envp[env->envp_idx++] = tmp - len;
+ }
+ }
+ }
+ /* no need for checks, since we are adding at most only 5 keys */
+ env->envp[env->envp_idx++] = NULL;
+ kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
+ kfree(env);
+ kfree(pathbuf);
+}
+
static int kvm_init_debug(void)
{
int r = -EEXIST;
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 37d9118fd84b..d99850c462a1 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -51,6 +51,22 @@ static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
return vfio_group;
}
+static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
+ struct file *filep)
+{
+ bool ret, (*fn)(struct vfio_group *, struct file *);
+
+ fn = symbol_get(vfio_external_group_match_file);
+ if (!fn)
+ return false;
+
+ ret = fn(group, filep);
+
+ symbol_put(vfio_external_group_match_file);
+
+ return ret;
+}
+
static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
{
void (*fn)(struct vfio_group *);
@@ -231,37 +247,31 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
if (!f.file)
return -EBADF;
- vfio_group = kvm_vfio_group_get_external_user(f.file);
- fdput(f);
-
- if (IS_ERR(vfio_group))
- return PTR_ERR(vfio_group);
-
ret = -ENOENT;
mutex_lock(&kv->lock);
list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->vfio_group != vfio_group)
+ if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
+ f.file))
continue;
list_del(&kvg->node);
+ kvm_arch_end_assignment(dev->kvm);
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ kvm_spapr_tce_release_vfio_group(dev->kvm,
+ kvg->vfio_group);
+#endif
+ kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
kvm_vfio_group_put_external_user(kvg->vfio_group);
kfree(kvg);
ret = 0;
break;
}
- kvm_arch_end_assignment(dev->kvm);
-
mutex_unlock(&kv->lock);
-#ifdef CONFIG_SPAPR_TCE_IOMMU
- kvm_spapr_tce_release_vfio_group(dev->kvm, vfio_group);
-#endif
- kvm_vfio_group_set_kvm(vfio_group, NULL);
-
- kvm_vfio_group_put_external_user(vfio_group);
+ fdput(f);
kvm_vfio_update_coherency(dev);